Fork me on GitHub

多进程抓取猫眼电影TOP100

用到技能点:

  1. 正则表达式
  2. 多进程和进程池
  3. urllib.request模块

开始尝试了requests发现有点问题,一直返回一个错误的页面,说我被禁止访问,但仔细看了发现,并没有封我IP,我猜测可能是猫眼的反爬虫策略。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import json
from multiprocessing import Pool
import re
import urllib.request
def get_one_page(url):
request_obj = urllib.request.urlopen(url)
response = request_obj.read().decode('utf-8')
return response
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
+'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'排名': item[0],
'封面': item[1],
'电影名': item[2],
'主演': item[3].strip()[3:],
'上映时间': item[4].strip()[5:],
'评分': item[5]+item[6]
}
def write_to_file(content):
with open('result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
f.close()
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
pool.close()
pool.join()