Python多線程豆瓣影評API接口爬蟲

爬蟲庫

使用簡單的requests庫,這是一個阻塞的庫,速度比較慢。
解析使用XPATH表達式
整體採用類的形式html

多線程

使用concurrent.future併發模塊,創建線程池,把future對象扔進去執行便可實現併發爬取效果node

數據存儲

使用Python ORM sqlalchemy保存到數據庫,也能夠使用自帶的csv模塊存在CSV中。python

API接口

由於API接口存在數據保護狀況,一個電影的每個分類只能抓取前25頁,所有評論、好評、中評、差評全部分類能爬100頁,每頁有20個數據,即最多爲兩千條數據。mysql

由於時效性緣由,不保證代碼能爬到數據,只是給你們一個參考思路,上代碼sql

from datetime import datetime
import random
import csv
from concurrent.futures import ThreadPoolExecutor, as_completed

from lxml import etree
import pymysql
import requests

from models import create_session, Comments

#隨機UA
USERAGENT = [
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
    'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50',
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12'
]


class CommentFetcher:
    headers = {'User-Agent': ''}
    cookie = ''
    cookies = {'cookie': cookie}
    # cookie爲登陸後的cookie,須要自行復制
    base_node = '//div[@class="comment-item"]'


    def __init__(self, movie_id, start, type=''):
        '''
        :type: 所有評論:'', 好評:h 中評:m 差評:l
        :movie_id: 影片的ID號
        :start: 開始的記錄數,0-480
        '''
        self.movie_id = movie_id
        self.start = start
        self.type = type
        self.url = 'https://movie.douban.com/subject/{id}/comments?start={start}&limit=20&sort=new_score\&status=P&percent_type={type}&comments_only=1'.format(
            id=str(self.movie_id),
            start=str(self.start),
            type=self.type
        )
        #建立數據庫鏈接
        self.session = create_session()

    #隨機useragent
    def _random_UA(self):
        self.headers['User-Agent'] = random.choice(USERAGENT)


    #獲取api接口,使用get方法,返回的數據爲json數據,須要提取裏面的HTML
    def _get(self):
        self._random_UA()
        res = ''
        try:
            res = requests.get(self.url, cookies=self.cookies, headers=self.headers)
            res = res.json()['html']
        except Exception as e:
            print('IP被封,請使用代理IP')
        print('正在獲取{} 開始的記錄'.format(self.start))
        return res

    def _parse(self):
        res = self._get()
        dom = etree.HTML(res)

        #id號
        self.id = dom.xpath(self.base_node + '/@data-cid')
        #用戶名
        self.username = dom.xpath(self.base_node + '/div[@class="avatar"]/a/@title')
        #用戶鏈接
        self.user_center = dom.xpath(self.base_node + '/div[@class="avatar"]/a/@href')
        #點贊數
        self.vote = dom.xpath(self.base_node + '//span[@class="votes"]/text()')
        #星級
        self.star = dom.xpath(self.base_node + '//span[contains(@class,"rating")]/@title')
        #發表時間
        self.time = dom.xpath(self.base_node + '//span[@class="comment-time "]/@title')
        #評論內容 全部span標籤class名爲short的節點文本
        self.content = dom.xpath(self.base_node + '//span[@class="short"]/text()')

    #保存到數據庫
    def save_to_database(self):
        self._parse()
        for i in range(len(self.id)):
            try:
                comment = Comments(
                    id=int(self.id[i]),
                    username=self.username[i],
                    user_center=self.user_center[i],
                    vote=int(self.vote[i]),
                    star=self.star[i],
                    time=datetime.strptime(self.time[i], '%Y-%m-%d %H:%M:%S'),
                    content=self.content[i]
                )

                self.session.add(comment)
                self.session.commit()
                return 'finish'


            except pymysql.err.IntegrityError as e:
                print('數據重複,不作任何處理')

            except Exception as e:
                #數據添加錯誤,回滾
                self.session.rollback()

            finally:
                #關閉數據庫鏈接
                self.session.close()

    #保存到csv
    def save_to_csv(self):
        self._parse()
        f = open('comment.csv', 'w', encoding='utf-8')
        csv_in = csv.writer(f, dialect='excel')
        for i in range(len(self.id)):
            csv_in.writerow([
                int(self.id[i]),
                self.username[i],
                self.user_center[i],
                int(self.vote[i]),
                self.time[i],
                self.content[i]
            ])
        f.close()


if __name__ == '__main__':
    with ThreadPoolExecutor(max_workers=4) as executor:
        futures = []
        for i in ['', 'h', 'm', 'l']:
            for j in range(25):
                fetcher = CommentFetcher(movie_id=26266893, start=j * 20, type=i)
                futures.append(executor.submit(fetcher.save_to_csv))

        for f in as_completed(futures):
            try:
                res = f.done()
                if res:
                    ret_data = f.result()
                    if ret_data == 'finish':
                        print('{} 成功保存數據'.format(str(f)))
            except Exception as e:
                f.cancel()
相關文章
相關標籤/搜索