# _*_ coding:utf-8 _*_ import json import requests # 使用了線程庫 import threading from Queue import Queue from lxml import etree class ThreadCrawl(threading.Thread): def __init__(self, threadName, pageQueue, dataQueue): # 調用父類初始化方法 super(ThreadCrawl, self).__init__() # 線程名 self.threadName = threadName # 頁碼隊列 self.pageQueue = pageQueue # 解析結果隊列 self.dataQueue = dataQueue self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'} def run(self): print '啓動' + self.threadName while not CRAWL_EXIT: try: # 取出一個數字,先進先出。 # 可選參數block,默認值爲True # 1.若是隊列爲空,block爲True的話,不會結束,就會進入阻塞狀態,知道隊列有新的數據 # 2.若是隊列爲空,block爲False的話,就彈出一個Queue.empty()異常, page = self.pageQueue.get(False) url = 'https://www.qiushibaike.com/8hr/page/' + str(page) + '/' r = requests.get(url, headers=self.headers) # print r.content self.dataQueue.put(r.content) except: pass print '結束' + self.threadName class ThreadParse(threading.Thread): def __init__(self, threadName, dataQueue, filename): super(ThreadParse, self).__init__() self.threadName = threadName self.dataQueue = dataQueue self.filename = filename def run(self): while not PARSE_EXIT: try: html = self.dataQueue.get(False) self.parse1(html) # print html except: pass def parse1(self, html): html = etree.HTML(html) node_list = html.xpath('//div[contains(@id, "qiushi_tag")]') items = {} for node in node_list: # xpath返回列表,這個列表就這一個參數,用索引方式取出來,用戶名 username = node.xpath('./div/a/h2')[0].text # 圖片;連接 image = node.xpath('.//div[@class="thumb"]//@src') # 段子內容 content = node.xpath('.//div[@class="content"]/span')[0].text # 點贊 zan = node.xpath('.//i')[0].text # 評論 comment = node.xpath('.//i')[1].text items = { 'username': username, 'image': image, 'content': content, 'zan': zan, 'comment': comment } self.filename.write(json.dumps(items, ensure_ascii=False).encode('utf-8')) CRAWL_EXIT = False PARSE_EXIT = False def main(): # 頁碼的隊列,表示10個頁面 pageQueue = Queue(10) # 放入1~10的數字,先進先出 for page in range(1, 11): pageQueue.put(page) # 採集結果的數據隊列,參數爲空表示不限制 dataQueue = Queue() filename = open("duanzi.json", 'a') # 三個採集線程的名字 crawlList = ["採集線程一號", "採集線程二號", "採集線程三號"] # 存儲三個採集線程的名字 threadcrawl = [] for threadName in crawlList: thread = ThreadCrawl(threadName, pageQueue, dataQueue) thread.start() threadcrawl.append(thread) # 三個解析線程的名字 parseList = ['解析線程一號', '解析線程二號', '解析線程三號'] # 存儲三個解析線程的名字 threadparse = [] for threadName in parseList: thread = ThreadParse(threadName, dataQueue, filename) thread.start() threadparse.append(thread) # 等待pageQueue隊列爲空,也就是等待以前的操做執行完畢 while not pageQueue.empty(): pass # 若是pageQueue爲空,採集線程退出循環 global CRAWL_EXIT CRAWL_EXIT = True print "pageQueue爲空" while not dataQueue.empty(): pass global PARSE_EXIT PARSE_EXIT = True for thread in threadcrawl: thread.join() print '1' if __name__ == "__main__": main()