爬蟲相關

性能相關

在編寫爬蟲時,性能的消耗主要在IO請求中,當單進程單線程模式下請求URL時必然會引發等待,從而使得請求總體變慢。html

 1 import requests
 2 
 3 def fetch_async(url):
 4     response = requests.get(url)
 5     return response
 6 
 7 
 8 url_list = ['http://www.github.com', 'http://www.bing.com']
 9 
10 for url in url_list:
11     fetch_async(url)
1.同步執行
 1 from concurrent.futures import ThreadPoolExecutor
 2 import requests
 3 
 4 
 5 def fetch_async(url):
 6     response = requests.get(url)
 7     return response
 8 
 9 
10 url_list = ['http://www.github.com', 'http://www.bing.com']
11 pool = ThreadPoolExecutor(5)
12 for url in url_list:
13     pool.submit(fetch_async, url)
14 pool.shutdown(wait=True)
2.多線程執行
 1 from concurrent.futures import ThreadPoolExecutor
 2 import requests
 3 
 4 def fetch_async(url):
 5     response = requests.get(url)
 6     return response
 7 
 8 
 9 def callback(future):
10     print(future.result())
11 
12 
13 url_list = ['http://www.github.com', 'http://www.bing.com']
14 pool = ThreadPoolExecutor(5)
15 for url in url_list:
16     v = pool.submit(fetch_async, url)
17     v.add_done_callback(callback)
18 pool.shutdown(wait=True)
2.多線程+回調函數執行
 1 from concurrent.futures import ProcessPoolExecutor
 2 import requests
 3 
 4 def fetch_async(url):
 5     response = requests.get(url)
 6     return response
 7 
 8 
 9 url_list = ['http://www.github.com', 'http://www.bing.com']
10 pool = ProcessPoolExecutor(5)
11 for url in url_list:
12     pool.submit(fetch_async, url)
13 pool.shutdown(wait=True)
3.多進程執行
 1 from concurrent.futures import ProcessPoolExecutor
 2 import requests
 3 
 4 
 5 def fetch_async(url):
 6     response = requests.get(url)
 7     return response
 8 
 9 
10 def callback(future):
11     print(future.result())
12 
13 
14 url_list = ['http://www.github.com', 'http://www.bing.com']
15 pool = ProcessPoolExecutor(5)
16 for url in url_list:
17     v = pool.submit(fetch_async, url)
18     v.add_done_callback(callback)
19 pool.shutdown(wait=True)
3.多進程+回調函數

經過上述代碼都可以完成對請求性能的提升,對於多線程和多進行的缺點是在IO阻塞時會形成了線程和進程的浪費,因此異步IO回事首選:python

 1 import asyncio
 2 
 3 
 4 @asyncio.coroutine
 5 def func1():
 6     print('before...func1......')
 7     yield from asyncio.sleep(5)
 8     print('end...func1......')
 9 
10 
11 tasks = [func1(), func1()]
12 
13 loop = asyncio.get_event_loop()
14 loop.run_until_complete(asyncio.gather(*tasks))
15 loop.close()
1.asyncio實例1
 1 import asyncio
 2 
 3 
 4 @asyncio.coroutine
 5 def fetch_async(host, url='/'):
 6     print(host, url)
 7     reader, writer = yield from asyncio.open_connection(host, 80)
 8 
 9     request_header_content = """GET %s HTTP/1.0\r\nHost: %s\r\n\r\n""" % (url, host,)
10     request_header_content = bytes(request_header_content, encoding='utf-8')
11 
12     writer.write(request_header_content)
13     yield from writer.drain()
14     text = yield from reader.read()
15     print(host, url, text)
16     writer.close()
17 
18 tasks = [
19     fetch_async('www.cnblogs.com', '/wupeiqi/'),
20     fetch_async('dig.chouti.com', '/pic/show?nid=4073644713430508&lid=10273091')
21 ]
22 
23 loop = asyncio.get_event_loop()
24 results = loop.run_until_complete(asyncio.gather(*tasks))
25 loop.close()
1.asyncio示例2
 1 import aiohttp
 2 import asyncio
 3 
 4 
 5 @asyncio.coroutine
 6 def fetch_async(url):
 7     print(url)
 8     response = yield from aiohttp.request('GET', url)
 9     # data = yield from response.read()
10     # print(url, data)
11     print(url, response)
12     response.close()
13 
14 
15 tasks = [fetch_async('http://www.google.com/'), fetch_async('http://www.chouti.com/')]
16 
17 event_loop = asyncio.get_event_loop()
18 results = event_loop.run_until_complete(asyncio.gather(*tasks))
19 event_loop.close()
2.asyncio + aiohttp
 1 import asyncio
 2 import requests
 3 
 4 
 5 @asyncio.coroutine
 6 def fetch_async(func, *args):
 7     loop = asyncio.get_event_loop()
 8     future = loop.run_in_executor(None, func, *args)
 9     response = yield from future
10     print(response.url, response.content)
11 
12 
13 tasks = [
14     fetch_async(requests.get, 'http://www.cnblogs.com/wupeiqi/'),
15     fetch_async(requests.get, 'http://dig.chouti.com/pic/show?nid=4073644713430508&lid=10273091')
16 ]
17 
18 loop = asyncio.get_event_loop()
19 results = loop.run_until_complete(asyncio.gather(*tasks))
20 loop.close()
3.asyncio + requests
 1 import gevent
 2 
 3 import requests
 4 from gevent import monkey
 5 
 6 monkey.patch_all()
 7 
 8 
 9 def fetch_async(method, url, req_kwargs):
10     print(method, url, req_kwargs)
11     response = requests.request(method=method, url=url, **req_kwargs)
12     print(response.url, response.content)
13 
14 # ##### 發送請求 #####
15 gevent.joinall([
16     gevent.spawn(fetch_async, method='get', url='https://www.python.org/', req_kwargs={}),
17     gevent.spawn(fetch_async, method='get', url='https://www.yahoo.com/', req_kwargs={}),
18     gevent.spawn(fetch_async, method='get', url='https://github.com/', req_kwargs={}),
19 ])
20 
21 # ##### 發送請求(協程池控制最大協程數量) #####
22 # from gevent.pool import Pool
23 # pool = Pool(None)
24 # gevent.joinall([
25 #     pool.spawn(fetch_async, method='get', url='https://www.python.org/', req_kwargs={}),
26 #     pool.spawn(fetch_async, method='get', url='https://www.yahoo.com/', req_kwargs={}),
27 #     pool.spawn(fetch_async, method='get', url='https://www.github.com/', req_kwargs={}),
28 # ])
4.gevent + requests
 1 import grequests
 2 
 3 
 4 request_list = [
 5     grequests.get('http://httpbin.org/delay/1', timeout=0.001),
 6     grequests.get('http://fakedomain/'),
 7     grequests.get('http://httpbin.org/status/500')
 8 ]
 9 
10 
11 # ##### 執行並獲取響應列表 #####
12 # response_list = grequests.map(request_list)
13 # print(response_list)
14 
15 
16 # ##### 執行並獲取響應列表(處理異常) #####
17 # def exception_handler(request, exception):
18 # print(request,exception)
19 #     print("Request failed")
20 
21 # response_list = grequests.map(request_list, exception_handler=exception_handler)
22 # print(response_list)
5.grequests
 1 from twisted.web.client import getPage
 2 from twisted.internet import reactor
 3 
 4 REV_COUNTER = 0
 5 REQ_COUNTER = 0
 6 
 7 def callback(contents):
 8     print(contents,)
 9 
10     global REV_COUNTER
11     REV_COUNTER += 1
12     if REV_COUNTER == REQ_COUNTER:
13         reactor.stop()
14 
15 
16 url_list = ['http://www.bing.com', 'http://www.baidu.com', ]
17 REQ_COUNTER = len(url_list)
18 for url in url_list:
19     deferred = getPage(bytes(url, encoding='utf8'))
20     deferred.addCallback(callback)
21 reactor.run()
6.Twisted示例1
 1 from twisted.web.client import getPage
 2 from twisted.internet import reactor
 3 
 4 
 5 class TwistedRequest(object):
 6     def __init__(self):
 7         self.__req_counter = 0
 8         self.__rev_counter = 0
 9 
10     def __execute(self, content, url, callback):
11         if callback:
12             callback(url, content)
13         self.__rev_counter += 1
14         if self.__rev_counter == self.__req_counter:
15             reactor.stop()
16 
17     def fetch_url(self, url_callback_list):
18 
19         self.__req_counter = len(url_callback_list)
20 
21         for item in url_callback_list:
22             url = item['url']
23             success_callback = item['success_callback']
24             error_callback = item['error_callback']
25 
26             deferred = getPage(bytes(url, encoding='utf8'))
27             deferred.addCallback(self.__execute, url, success_callback)
28             deferred.addErrback(self.__execute, url, error_callback)
29 
30         reactor.run()
31 
32 
33 def callback(url, content):
34     print(url, content)
35 
36 
37 def error(url, content):
38     print(url, content)
39 
40 
41 obj = TwistedRequest()
42 obj.fetch_url([
43     {'url': 'http://www.baidu.com', 'success_callback': callback, 'error_callback': error},
44     {'url': 'http://www.google.com', 'success_callback': callback, 'error_callback': error},
45 ])
6.Twisted示例2
 1 from tornado.httpclient import AsyncHTTPClient
 2 from tornado.httpclient import HTTPRequest
 3 from tornado import ioloop
 4 
 5 
 6 def handle_response(response):
 7     if response.error:
 8         print("Error:", response.error)
 9     else:
10         print(response.body)
11         # 方法同twisted
12         # ioloop.IOLoop.current().stop()
13 
14 
15 def func():
16     url_list = [
17         'http://www.google.com',
18         'http://127.0.0.1:8000/test2/',
19     ]
20     for url in url_list:
21         print(url)
22         http_client = AsyncHTTPClient()
23         http_client.fetch(HTTPRequest(url), handle_response)
24 
25 
26 ioloop.IOLoop.current().add_callback(func)
27 ioloop.IOLoop.current().start()
7.tornado

以上均是Python內置以及第三方模塊提供異步IO請求模塊,使用簡便大大提升效率,而對於異步IO請求的本質則是【非阻塞Socket】+【IO多路複用】:react

  1 import select
  2 import socket
  3 import time
  4 
  5 
  6 class AsyncTimeoutException(TimeoutError):
  7     """
  8     請求超時異常類
  9     """
 10 
 11     def __init__(self, msg):
 12         self.msg = msg
 13         super(AsyncTimeoutException, self).__init__(msg)
 14 
 15 
 16 class HttpContext(object):
 17     """封裝請求和相應的基本數據"""
 18 
 19     def __init__(self, sock, host, port, method, url, data, callback, timeout=5):
 20         """
 21         sock: 請求的客戶端socket對象
 22         host: 請求的主機名
 23         port: 請求的端口
 24         port: 請求的端口
 25         method: 請求方式
 26         url: 請求的URL
 27         data: 請求時請求體中的數據
 28         callback: 請求完成後的回調函數
 29         timeout: 請求的超時時間
 30         """
 31         self.sock = sock
 32         self.callback = callback
 33         self.host = host
 34         self.port = port
 35         self.method = method
 36         self.url = url
 37         self.data = data
 38 
 39         self.timeout = timeout
 40 
 41         self.__start_time = time.time()
 42         self.__buffer = []
 43 
 44     def is_timeout(self):
 45         """當前請求是否已經超時"""
 46         current_time = time.time()
 47         if (self.__start_time + self.timeout) < current_time:
 48             return True
 49 
 50     def fileno(self):
 51         """請求sockect對象的文件描述符,用於select監聽"""
 52         return self.sock.fileno()
 53 
 54     def write(self, data):
 55         """在buffer中寫入響應內容"""
 56         self.__buffer.append(data)
 57 
 58     def finish(self, exc=None):
 59         """在buffer中寫入響應內容完成,執行請求的回調函數"""
 60         if not exc:
 61             response = b''.join(self.__buffer)
 62             self.callback(self, response, exc)
 63         else:
 64             self.callback(self, None, exc)
 65 
 66     def send_request_data(self):
 67         content = """%s %s HTTP/1.0\r\nHost: %s\r\n\r\n%s""" % (
 68             self.method.upper(), self.url, self.host, self.data,)
 69 
 70         return content.encode(encoding='utf8')
 71 
 72 
 73 class AsyncRequest(object):
 74     def __init__(self):
 75         self.fds = []
 76         self.connections = []
 77 
 78     def add_request(self, host, port, method, url, data, callback, timeout):
 79         """建立一個要請求"""
 80         client = socket.socket()
 81         client.setblocking(False)
 82         try:
 83             client.connect((host, port))
 84         except BlockingIOError as e:
 85             pass
 86             # print('已經向遠程發送鏈接的請求')
 87         req = HttpContext(client, host, port, method, url, data, callback, timeout)
 88         self.connections.append(req)
 89         self.fds.append(req)
 90 
 91     def check_conn_timeout(self):
 92         """檢查全部的請求,是否有已經鏈接超時,若是有則終止"""
 93         timeout_list = []
 94         for context in self.connections:
 95             if context.is_timeout():
 96                 timeout_list.append(context)
 97         for context in timeout_list:
 98             context.finish(AsyncTimeoutException('請求超時'))
 99             self.fds.remove(context)
100             self.connections.remove(context)
101 
102     def running(self):
103         """事件循環,用於檢測請求的socket是否已經就緒,從而執行相關操做"""
104         while True:
105             r, w, e = select.select(self.fds, self.connections, self.fds, 0.05)
106 
107             if not self.fds:
108                 return
109 
110             for context in r:
111                 sock = context.sock
112                 while True:
113                     try:
114                         data = sock.recv(8096)
115                         if not data:
116                             self.fds.remove(context)
117                             context.finish()
118                             break
119                         else:
120                             context.write(data)
121                     except BlockingIOError as e:
122                         break
123                     except TimeoutError as e:
124                         self.fds.remove(context)
125                         self.connections.remove(context)
126                         context.finish(e)
127                         break
128 
129             for context in w:
130                 # 已經鏈接成功遠程服務器,開始向遠程發送請求數據
131                 if context in self.fds:
132                     data = context.send_request_data()
133                     context.sock.sendall(data)
134                     self.connections.remove(context)
135 
136             self.check_conn_timeout()
137 
138 
139 if __name__ == '__main__':
140     def callback_func(context, response, ex):
141         """
142         :param context: HttpContext對象,內部封裝了請求相關信息
143         :param response: 請求響應內容
144         :param ex: 是否出現異常(若是有異常則值爲異常對象;不然值爲None)
145         :return:
146         """
147         print(context, response, ex)
148 
149     obj = AsyncRequest()
150     url_list = [
151         {'host': 'www.google.com', 'port': 80, 'method': 'GET', 'url': '/', 'data': '', 'timeout': 5,
152          'callback': callback_func},
153         {'host': 'www.baidu.com', 'port': 80, 'method': 'GET', 'url': '/', 'data': '', 'timeout': 5,
154          'callback': callback_func},
155         {'host': 'www.bing.com', 'port': 80, 'method': 'GET', 'url': '/', 'data': '', 'timeout': 5,
156          'callback': callback_func},
157     ]
158     for item in url_list:
159         print(item)
160         obj.add_request(**item)
161 
162     obj.running()
最牛逼的異步IO模塊

Scrapy

Scrapy是一個爲了爬取網站數據,提取結構性數據而編寫的應用框架。 其能夠應用在數據挖掘,信息處理或存儲歷史數據等一系列的程序中。
其最初是爲了頁面抓取 (更確切來講, 網絡抓取 )所設計的, 也能夠應用在獲取API所返回的數據(例如 Amazon Associates Web Services ) 或者通用的網絡爬蟲。Scrapy用途普遍,能夠用於數據挖掘、監測和自動化測試。git

Scrapy 使用了 Twisted異步網絡庫來處理網絡通信。總體架構大體以下:github

 

Scrapy主要包括瞭如下組件:web

  • 引擎(Scrapy)
    用來處理整個系統的數據流處理, 觸發事務(框架核心)
  • 調度器(Scheduler)
    用來接受引擎發過來的請求, 壓入隊列中, 並在引擎再次請求的時候返回. 能夠想像成一個URL(抓取網頁的網址或者說是連接)的優先隊列, 由它來決定下一個要抓取的網址是什麼, 同時去除重複的網址
  • 下載器(Downloader)
    用於下載網頁內容, 並將網頁內容返回給蜘蛛(Scrapy下載器是創建在twisted這個高效的異步模型上的)
  • 爬蟲(Spiders)
    爬蟲是主要幹活的, 用於從特定的網頁中提取本身須要的信息, 即所謂的實體(Item)。用戶也能夠從中提取出連接,讓Scrapy繼續抓取下一個頁面
  • 項目管道(Pipeline)
    負責處理爬蟲從網頁中抽取的實體,主要的功能是持久化實體、驗證明體的有效性、清除不須要的信息。當頁面被爬蟲解析後,將被髮送到項目管道,並通過幾個特定的次序處理數據。
  • 下載器中間件(Downloader Middlewares)
    位於Scrapy引擎和下載器之間的框架,主要是處理Scrapy引擎與下載器之間的請求及響應。
  • 爬蟲中間件(Spider Middlewares)
    介於Scrapy引擎和爬蟲之間的框架,主要工做是處理蜘蛛的響應輸入和請求輸出。
  • 調度中間件(Scheduler Middewares)
    介於Scrapy引擎和調度之間的中間件,從Scrapy引擎發送到調度的請求和響應。

Scrapy運行流程大概以下:json

  1. 引擎從調度器中取出一個連接(URL)用於接下來的抓取
  2. 引擎把URL封裝成一個請求(Request)傳給下載器
  3. 下載器把資源下載下來,並封裝成應答包(Response)
  4. 爬蟲解析Response
  5. 解析出實體(Item),則交給實體管道進行進一步的處理
  6. 解析出的是連接(URL),則把URL交給調度器等待抓取

1、安裝
服務器

Linux
      pip3 install scrapy
 
 
Windows
      a. pip3 install wheel
      b. 下載twisted http://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted
      c. 進入下載目錄,執行 pip3 install Twisted‑17.1.0‑cp35‑cp35m‑win_amd64.whl
      d. pip3 install scrapy
      e. 下載並安裝pywin32:https://sourceforge.net/projects/pywin32/files/
    f. 下載安裝 OpenSSL: 下載地址:https://pypi.python.org/pypi/pyOpenSSL#downloads
      安裝方式同安裝twisted

 

2、基本使用cookie

1. 基本命令網絡

1. scrapy startproject 項目名稱
   - 在當前目錄中建立中建立一個項目文件(相似於Django)
 
2. scrapy genspider [-t template] <name> <domain>
   - 建立爬蟲應用
   如:
      scrapy gensipider -t basic oldboy oldboy.com
      scrapy gensipider -t xmlfeed autohome autohome.com.cn
   PS:
      查看全部命令:scrapy gensipider -l
      查看模板命令:scrapy gensipider -d 模板名稱
 
3. scrapy list
   - 展現爬蟲應用列表
 
4. scrapy crawl 爬蟲應用名稱
   - 運行單獨爬蟲應用

2.項目結構以及爬蟲應用簡介

project_name/
   scrapy.cfg
   project_name/
       __init__.py
       items.py
       pipelines.py
       settings.py
       spiders/
           __init__.py
           爬蟲1.py
           爬蟲2.py
           爬蟲3.py

文件說明:

  • scrapy.cfg  項目的主配置信息。(真正爬蟲相關的配置信息在settings.py文件中)
  • items.py    設置數據存儲模板,用於結構化數據,如:Django的Model
  • pipelines    數據處理行爲,如:通常結構化的數據持久化
  • settings.py 配置文件,如:遞歸的層數、併發數,延遲下載等
  • spiders      爬蟲目錄,如:建立文件,編寫爬蟲規則

注意:通常建立爬蟲文件時,以網站域名命名;在執行的時候會先訪問一個robots.txt的文件(防爬), 這個能夠經過將配置文件中的ROBOTSTXT_OBEY設置爲False來規避。

 1 import scrapy
 2  
 3 class XiaoHuarSpider(scrapy.spiders.Spider):
 4     name = "xiaohuar"                            # 爬蟲名稱 *****
 5     allowed_domains = ["xiaohuar.com"]  # 容許的域名
 6     start_urls = [
 7         "http://www.xiaohuar.com/hua/",   # 其實URL
 8     ]
 9  
10     def parse(self, response):
11         # 訪問起始URL並獲取結果後的回調函數
爬蟲1.py

3. 小試牛刀

import scrapy
from scrapy.selector import HtmlXPathSelector
from scrapy.http.request import Request
 
 
class DigSpider(scrapy.Spider):
    # 爬蟲應用的名稱,經過此名稱啓動爬蟲命令
    name = "dig"
 
    # 容許的域名
    allowed_domains = ["chouti.com"]
 
    # 起始URL
    start_urls = [
        'http://dig.chouti.com/',
    ]
 
    has_request_set = {}
 
    def parse(self, response):
        print(response.url)
 
        hxs = HtmlXPathSelector(response)
        page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract()
        for page in page_list:
            page_url = 'http://dig.chouti.com%s' % page
            key = self.md5(page_url)
            if key in self.has_request_set:
                pass
            else:
                self.has_request_set[key] = page_url
                obj = Request(url=page_url, method='GET', callback=self.parse)
                yield obj
 
    @staticmethod
    def md5(val):
        import hashlib
        ha = hashlib.md5()
        ha.update(bytes(val, encoding='utf-8'))
        key = ha.hexdigest()
        return key

執行此爬蟲文件,則在終端進入項目目錄執行以下命令:

1 scrapy crawl dig --nolog

對於上述代碼重要之處在於:

  • Request是一個封裝用戶請求的類,在回調函數中yield該對象表示繼續訪問
  • HtmlXpathSelector用於結構化HTML代碼並提供選擇器功能

4. 選擇器

#!/usr/bin/env python
# -*- coding:utf-8 -*-
from scrapy.selector import Selector, HtmlXPathSelector
from scrapy.http import HtmlResponse
html = """<!DOCTYPE html>
<html>
    <head lang="en">
        <meta charset="UTF-8">
        <title></title>
    </head>
    <body>
        <ul>
            <li class="item-"><a id='i1' href="link.html">first item</a></li>
            <li class="item-0"><a id='i2' href="llink.html">first item</a></li>
            <li class="item-1"><a href="llink2.html">second item<span>vv</span></a></li>
        </ul>
        <div><a href="llink2.html">second item</a></div>
    </body>
</html>
"""
response = HtmlResponse(url='http://example.com', body=html,encoding='utf-8')
# hxs = HtmlXPathSelector(response)
# print(hxs)
# hxs = Selector(response=response).xpath('//a')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[2]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[@id]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[@id="i1"]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[@href="link.html"][@id="i1"]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[contains(@href, "link")]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[starts-with(@href, "link")]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]')
# print(hxs)
# hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/text()').extract()
# print(hxs)
# hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/@href').extract()
# print(hxs)
# hxs = Selector(response=response).xpath('/html/body/ul/li/a/@href').extract()
# print(hxs)
# hxs = Selector(response=response).xpath('//body/ul/li/a/@href').extract_first()
# print(hxs)
 
# ul_list = Selector(response=response).xpath('//body/ul/li')
# for item in ul_list:
#     v = item.xpath('./a/span')
#     # 或
#     # v = item.xpath('a/span')
#     # 或
#     # v = item.xpath('*/a/span')
#     print(v)
 1 import scrapy
 2 from scrapy.selector import HtmlXPathSelector
 3 from scrapy.http.request import Request
 4 from scrapy.http.cookies import CookieJar
 5 from scrapy import FormRequest
 6 
 7 
 8 class ChouTiSpider(scrapy.Spider):
 9     # 爬蟲應用的名稱,經過此名稱啓動爬蟲命令
10     name = "chouti"
11     # 容許的域名
12     allowed_domains = ["chouti.com"]
13 
14     cookie_dict = {}
15     has_request_set = {}
16 
17     def start_requests(self):
18         url = 'http://dig.chouti.com/'
19         # return [Request(url=url, callback=self.login)]
20         yield Request(url=url, callback=self.login)
21 
22     def login(self, response):
23         cookie_jar = CookieJar()
24         cookie_jar.extract_cookies(response, response.request)
25         for k, v in cookie_jar._cookies.items():
26             for i, j in v.items():
27                 for m, n in j.items():
28                     self.cookie_dict[m] = n.value
29 
30         req = Request(
31             url='http://dig.chouti.com/login',
32             method='POST',
33             headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
34             body='phone=8615131255089&password=pppppppp&oneMonth=1',
35             cookies=self.cookie_dict,
36             callback=self.check_login
37         )
38         yield req
39 
40     def check_login(self, response):
41         req = Request(
42             url='http://dig.chouti.com/',
43             method='GET',
44             callback=self.show,
45             cookies=self.cookie_dict,
46             dont_filter=True
47         )
48         yield req
49 
50     def show(self, response):
51         # print(response)
52         hxs = HtmlXPathSelector(response)
53         news_list = hxs.select('//div[@id="content-list"]/div[@class="item"]')
54         for new in news_list:
55             # temp = new.xpath('div/div[@class="part2"]/@share-linkid').extract()
56             link_id = new.xpath('*/div[@class="part2"]/@share-linkid').extract_first()
57             yield Request(
58                 url='http://dig.chouti.com/link/vote?linksId=%s' %(link_id,),
59                 method='POST',
60                 cookies=self.cookie_dict,
61                 callback=self.do_favor
62             )
63 
64         page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract()
65         for page in page_list:
66 
67             page_url = 'http://dig.chouti.com%s' % page
68             import hashlib
69             hash = hashlib.md5()
70             hash.update(bytes(page_url,encoding='utf-8'))
71             key = hash.hexdigest()
72             if key in self.has_request_set:
73                 pass
74             else:
75                 self.has_request_set[key] = page_url
76                 yield Request(
77                     url=page_url,
78                     method='GET',
79                     callback=self.show
80                 )
81 
82     def do_favor(self, response):
83         print(response.text)
示例:自動登錄抽屜並點贊

注意:settings.py中設置DEPTH_LIMIT = 1來指定「遞歸」的層數。

5. 格式化處理

上述實例只是簡單的處理,因此在parse方法中直接處理。若是對於想要獲取更多的數據處理,則能夠利用Scrapy的items將數據格式化,而後統一交由pipelines來處理。

 1 import scrapy
 2 from scrapy.selector import HtmlXPathSelector
 3 from scrapy.http.request import Request
 4 from scrapy.http.cookies import CookieJar
 5 from scrapy import FormRequest
 6 
 7 
 8 class XiaoHuarSpider(scrapy.Spider):
 9     # 爬蟲應用的名稱,經過此名稱啓動爬蟲命令
10     name = "xiaohuar"
11     # 容許的域名
12     allowed_domains = ["xiaohuar.com"]
13 
14     start_urls = [
15         "http://www.xiaohuar.com/list-1-1.html",
16     ]
17     # custom_settings = {
18     #     'ITEM_PIPELINES':{
19     #         'spider1.pipelines.JsonPipeline': 100
20     #     }
21     # }
22     has_request_set = {}
23 
24     def parse(self, response):
25         # 分析頁面
26         # 找到頁面中符合規則的內容(校花圖片),保存
27         # 找到全部的a標籤,再訪問其餘a標籤,一層一層的搞下去
28 
29         hxs = HtmlXPathSelector(response)
30 
31         items = hxs.select('//div[@class="item_list infinite_scroll"]/div')
32         for item in items:
33             src = item.select('.//div[@class="img"]/a/img/@src').extract_first()
34             name = item.select('.//div[@class="img"]/span/text()').extract_first()
35             school = item.select('.//div[@class="img"]/div[@class="btns"]/a/text()').extract_first()
36             url = "http://www.xiaohuar.com%s" % src
37             from ..items import XiaoHuarItem
38             obj = XiaoHuarItem(name=name, school=school, url=url)
39             yield obj
40 
41         urls = hxs.select('//a[re:test(@href, "http://www.xiaohuar.com/list-1-\d+.html")]/@href')
42         for url in urls:
43             key = self.md5(url)
44             if key in self.has_request_set:
45                 pass
46             else:
47                 self.has_request_set[key] = url
48                 req = Request(url=url,method='GET',callback=self.parse)
49                 yield req
50 
51     @staticmethod
52     def md5(val):
53         import hashlib
54         ha = hashlib.md5()
55         ha.update(bytes(val, encoding='utf-8'))
56         key = ha.hexdigest()
57         return key
spiders/xiahuar.py
1 import scrapy
2 
3 
4 class XiaoHuarItem(scrapy.Item):
5     name = scrapy.Field()
6     school = scrapy.Field()
7     url = scrapy.Field()
items
 1 import json
 2 import os
 3 import requests
 4 
 5 
 6 class JsonPipeline(object):
 7     def __init__(self):
 8         self.file = open('xiaohua.txt', 'w')
 9 
10     def process_item(self, item, spider):
11         v = json.dumps(dict(item), ensure_ascii=False)
12         self.file.write(v)
13         self.file.write('\n')
14         self.file.flush()
15         return item
16 
17 
18 class FilePipeline(object):
19     def __init__(self):
20         if not os.path.exists('imgs'):
21             os.makedirs('imgs')
22 
23     def process_item(self, item, spider):
24         response = requests.get(item['url'], stream=True)
25         file_name = '%s_%s.jpg' % (item['name'], item['school'])
26         with open(os.path.join('imgs', file_name), mode='wb') as f:
27             f.write(response.content)
28         return item
pipelines
1 ITEM_PIPELINES = {
2    'spider1.pipelines.JsonPipeline': 100,
3    'spider1.pipelines.FilePipeline': 300,
4 }
5 # 每行後面的整型值,肯定了他們運行的順序,item按數字從低到高的順序,經過pipeline,一般將這些數字定義在0-1000範圍內。
settings

6.中間件

 1 class CustomSpiderMiddleware(object):
 2     # Not all methods need to be defined. If a method is not defined,
 3     # scrapy acts as if the spider middleware does not modify the
 4     # passed objects.
 5 
 6     def process_spider_input(self, response, spider):
 7         # Called for each response that goes through the spider
 8         # middleware and into the spider.
 9 
10         # Should return None or raise an exception.
11         print('process_spider_input', len(response.text))
12         return None
13 
14     def process_spider_output(self, response, result, spider):
15         # Called with the results returned from the Spider, after
16         # it has processed the response.
17         print('process_spider_output', len(response.text))
18         # Must return an iterable of Request, dict or Item objects.
19         for i in result:
20             yield i
21 
22     def process_spider_exception(self, response, exception, spider):
23         # Called when a spider or process_spider_input() method
24         # (from other spider middleware) raises an exception.
25 
26         # Should return either None or an iterable of Response, dict
27         # or Item objects.
28         print('process_spider_exception')
29         pass
30 
31     def process_start_requests(self, start_requests, spider):
32         # Called with the start requests of the spider, and works
33         # similarly to the process_spider_output() method, except
34         # that it doesn’t have a response associated.
35 
36         # Must return only requests (not items).
37         print('process_start_requests')
38         for r in start_requests:
39             yield r
40 
41     def spider_opened(self, spider):
42         spider.logger.info('Spider opened: %s' % spider.name)
43 
44 
45 class CustomDownloaderMiddleware(object):
46     def process_request(self, request, spider):
47         return None
48 
49     def process_response(self, request, response, spider):
50         return response
51 
52     def process_exception(self, request, exception, spider):
53         return None
middlewares.py
# settings.py
 
DOWNLOADER_MIDDLEWARES = {
    'spider1.middlewares.CustomDownloaderMiddleware': 543,
}
SPIDER_MIDDLEWARES = {
    'spider1.middlewares.CustomSpiderMiddleware': 543,
}

7. 自定製命令

  • 在spiders同級建立任意目錄,如:commands
  • 在其中建立 crawlall.py 文件 (此處文件名就是自定義的命令)
 1 from scrapy.commands import ScrapyCommand
 2     from scrapy.utils.project import get_project_settings
 3 
 4 
 5     class Command(ScrapyCommand):
 6 
 7         requires_project = True
 8 
 9         def syntax(self):
10             return '[options]'
11 
12         def short_desc(self):
13             return 'Runs all of the spiders'
14 
15         def run(self, args, opts):
16             spider_list = self.crawler_process.spiders.list()
17             for name in spider_list:
18                 self.crawler_process.crawl(name, **opts.__dict__)
19             self.crawler_process.start()
crawlall.py
  • 在settings.py 中添加配置 COMMANDS_MODULE = '項目名稱.目錄名稱'
  • 在項目目錄執行命令:scrapy crawlall 

8.概念補充

線程:線程是計算機工做中的最小單元,在IO密集型的程序中適合使用,可是也不太好,若是每一個線程又是IO請求的話,會形成浪費,使用協程更好 

進程:默認有主線程,能夠有多個線程,共享這個進程的內部資源。計算密集型的程序適合使用進程

協程:使用一個線程去完成多個任務,也叫微線程

GIL:Python特有的全局解釋器鎖,至關於在進程中給全部線程加鎖,保證同一時間只有一個線程被CUP調度

更多文檔參見:http://scrapy-chs.readthedocs.io/zh_CN/latest/index.html

相關文章
相關標籤/搜索