#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Dang Kai
# @Date: 2018-08-14 16:28:18
# @Last Modified time: 2018-08-15 08:57:44
# @E-mail: 1370465454@qq.com
# @Description:增加多进程
# http://maoyan.com/board/4
# http://maoyan.com/board/4?offset=20
import requests
import re
import json
from multiprocessing import Pool
from requests.exceptions import RequestException
def get_one_page(url, headers):
'''获取单页的html'''
try:
reponse = requests.get(url, headers=headers)
if reponse.status_code == 200:
return reponse.text
else:
return None
except RequestException: # 异常处理
return None
def parse_one_page(html):
'''正则匹配所需数据'''
pattern = re.compile('.*?board-index.*?>(\d+).*?data-src="(.*?)".*?name">'
+ '.*?>(.*?).*?star">(.*?).*?releasetime">(.*?)'
+ '.*?integer">(.*?).*?fraction">(.*?).*?', re.S)
items = re.findall(pattern, html)
# print(items)
for item in items:
yield{
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'starttime': item[4].strip()[5:],
'score': item[5] + item[6]
}
def write_to_file(content):
'''写入文件'''
with open('result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
f.close()
def main(offset):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'}
html = get_one_page(
'http://maoyan.com/board/4?offset=' + str(offset), headers)
# print(html)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
# for i in range(10):
# main(i*10)
pool = Pool()
pool.map(main, {i*10 for i in range(10)})