黑马程序员技术交流社区
标题:
【西安校区】 python爬虫实现有道自动化翻译
[打印本页]
作者:
逆风TO
时间:
2019-5-16 16:00
标题:
【西安校区】 python爬虫实现有道自动化翻译
有道自动化翻译英文文献:
代码1:
import urllib.request
import urllib.parse
import json
#python爬虫有道
def fanyi(context = ''):
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
data = {}
data['type'] = 'AUTO'
data['i'] = context
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['ue'] = 'UTF-8'
data['typoResult'] = 'true'
head = {}
head['Referer'] = 'http://fanyi.youdao.com/?keyfrom=dict2.top'
head['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
data = urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url,data,head)
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
html = json.loads(html)
return html['translateResult'][0][0]['tgt']
#读取文献翻译
f_r = open('./翻译/origin.txt','r')
f_w = open('./翻译/result.txt','a')
context = f_r.read()
context_list = context.split('。')
for sent in context_list:
sen_str = sent+'。'
fanyi_sen_str = fanyi(sen_str)
f_w.write(fanyi_sen_str+'\n')
f_r.close()
f_w.close()
代码2:
import urllib.request
import urllib.parse
import json
import time
# 延迟访问
while True:
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
content = input('请输入您要翻译的内容(输入"q"退出):')
if (content == 'q'):
break
data = {}
data['type'] = 'AUTO'
data['i'] = content
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['ue'] = 'UTF-8'
data['typoResult'] = 'true'
data = urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url,data)
req.add_header('Referer','http://fanyi.youdao.com/?keyfrom=dict2.top')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36')
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
html = json.loads(html)
print('翻译结果是: %s' % (html['translateResult'][0][0]['tgt']))
time.sleep(5)
代码4:
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import re
# 实现关键词查找,中文连接的urllib.request.urlopen(url)打开问题解决
def main():
keyword = input('请输入关键词:')
keyword = urllib.parse.urlencode({'word':keyword})
response = urllib.request.urlopen('https://baike.baidu.com/search/word?%s'% keyword)
html = response.read()
soup = BeautifulSoup(html,'html.parser')
for each in soup.find_all(href = re.compile('item')):
content = ''.join([each.text])
url2 = ''.join(['https://baike.baidu.com',each['href']])
url2_origin = url2
#需对网址进行重新转码
url2 = urllib.parse.quote(url2, safe=":/=?#")
response2 = urllib.request.urlopen(url2)
html2 = response2.read()
soup2 = BeautifulSoup(html2,'html.parser')
if soup2.h2:
content = ''.join([content,soup2.h2.text])
content = ''.join([content,'->',url2,'|||||->',url2_origin])
print(content)
print('***************************************************')
if __name__=='__main__':
main()
欢迎光临 黑马程序员技术交流社区 (http://bbs.itheima.com/)
黑马程序员IT技术论坛 X3.2