1.自定義異步非阻塞python
1 import socket 2 import select 3 4 class Request(object): 5 def __init__(self,sock,func,url): 6 self.sock = sock 7 self.func = func 8 self.url = url 9 10 def fileno(self): 11 return self.sock.fileno() 12 13 def async_request(url_list): 14 15 input_list = [] 16 conn_list = [] 17 18 for url in url_list: 19 client = socket.socket() 20 client.setblocking(False) 21 # 建立鏈接,不阻塞 22 try: 23 client.connect((url[0],80,)) # 100個向百度發送的請求 24 except BlockingIOError as e: 25 pass 26 27 obj = Request(client,url[1],url[0]) 28 29 input_list.append(obj) 30 conn_list.append(obj) 31 32 while True: 33 # 監聽socket是否已經發生變化 [request_obj,request_obj....request_obj] 34 # 若是有請求鏈接成功:wlist = [request_obj,request_obj] 35 # 若是有響應的數據: rlist = [request_obj,request_obj....client100] 36 rlist,wlist,elist = select.select(input_list,conn_list,[],0.05) 37 for request_obj in wlist: 38 # print('鏈接成功') 39 # # # # 發送Http請求 40 # print('發送請求') 41 request_obj.sock.sendall("GET / HTTP/1.0\r\nhost:{0}\r\n\r\n".format(request_obj.url).encode('utf-8')) 42 conn_list.remove(request_obj) 43 44 for request_obj in rlist: 45 data = request_obj.sock.recv(8096) 46 request_obj.func(data) 47 request_obj.sock.close() 48 input_list.remove(request_obj) 49 50 if not input_list: 51 break
2. 調用上面自定義模塊react
1 import s2 2 3 def callback1(data): 4 print('百度回來了',data) 5 6 def callback2(data): 7 print('必應回來了',data) 8 9 url_list = [ 10 ['www.baidu.com',callback1], 11 ['www.bing.com',callback2] 12 ] 13 s2.async_request(url_list) 14 15 # ################################# twisted ################################# 16 from twisted.web.client import getPage, defer 17 from twisted.internet import reactor 18 19 20 def all_done(arg): 21 reactor.stop() 22 23 24 def callback1(contents): 25 print(contents) 26 27 def callback2(contents): 28 print(contents) 29 deferred_list = [] 30 31 url_list = [ 32 ('http://www.bing.com',callback1), 33 ('http://www.baidu.com',callback2) 34 ] 35 for url in url_list: 36 deferred = getPage(bytes(url[0], encoding='utf8')) 37 deferred.addCallback(url[1]) 38 deferred_list.append(deferred) 39 40 dlist = defer.DeferredList(deferred_list) 41 dlist.addBoth(all_done)
2、scrapyweb
1. 安裝算法
pip3 install scrapy
2. 基本命令json
1. scrapy startproject p1 2. scrapy genspider chouti chouti.com 3. scrapy list 4. scrapy crawl chouti
3. 項目文件介紹緩存
scrapy.cfg 項目的主配置信息。(真正爬蟲相關的配置信息在settings.py文件中) items.py 設置數據存儲模板,用於結構化數據,如:Django的Model pipelines 數據處理行爲,如:通常結構化的數據持久化 settings.py 配置文件,如:遞歸的層數、併發數,延遲下載等 spiders 爬蟲目錄,如:建立文件,編寫爬蟲規則
4. 小試牛刀cookie
1 import scrapy 2 from scrapy.selector import HtmlXPathSelector 3 from scrapy.http.request import Request 4 5 6 class DigSpider(scrapy.Spider): 7 # 爬蟲應用的名稱,經過此名稱啓動爬蟲命令 8 name = "dig" 9 10 # 容許的域名 11 allowed_domains = ["chouti.com"] 12 13 # 起始URL 14 start_urls = [ 15 'http://dig.chouti.com/', 16 ] 17 18 has_request_set = {} 19 20 def parse(self, response): 21 print(response.url) 22 23 hxs = HtmlXPathSelector(response) 24 page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract() 25 for page in page_list: 26 page_url = 'http://dig.chouti.com%s' % page 27 key = self.md5(page_url) 28 if key in self.has_request_set: 29 pass 30 else: 31 self.has_request_set[key] = page_url 32 obj = Request(url=page_url, method='GET', callback=self.parse) 33 yield obj 34 35 @staticmethod 36 def md5(val): 37 import hashlib 38 ha = hashlib.md5() 39 ha.update(bytes(val, encoding='utf-8')) 40 key = ha.hexdigest() 41 return key
運行:scrapy crawl chouti.com --nolog併發
5. 選擇器app
1 from scrapy.selector import Selector, HtmlXPathSelector 2 from scrapy.http import HtmlResponse 3 html = """<!DOCTYPE html> 4 <html> 5 <head lang="en"> 6 <meta charset="UTF-8"> 7 <title></title> 8 </head> 9 <body> 10 <ul> 11 <li class="item-"><a id='i1' href="link.html">first item</a></li> 12 <li class="item-0"><a id='i2' href="llink.html">first item</a></li> 13 <li class="item-1"><a href="llink2.html">second item<span>vv</span></a></li> 14 </ul> 15 <div><a href="llink2.html">second item</a></div> 16 </body> 17 </html> 18 """ 19 response = HtmlResponse(url='http://example.com', body=html,encoding='utf-8') 20 # hxs = HtmlXPathSelector(response) 21 # print(hxs) 22 # hxs = Selector(response=response).xpath('//a') 23 # print(hxs) 24 # hxs = Selector(response=response).xpath('//a[2]') 25 # print(hxs) 26 # hxs = Selector(response=response).xpath('//a[@id]') 27 # print(hxs) 28 # hxs = Selector(response=response).xpath('//a[@id="i1"]') 29 # print(hxs) 30 # hxs = Selector(response=response).xpath('//a[@href="link.html"][@id="i1"]') 31 # print(hxs) 32 # hxs = Selector(response=response).xpath('//a[contains(@href, "link")]') 33 # print(hxs) 34 # hxs = Selector(response=response).xpath('//a[starts-with(@href, "link")]') 35 # print(hxs) 36 # hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]') 37 # print(hxs) 38 # hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/text()').extract() 39 # print(hxs) 40 # hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/@href').extract() 41 # print(hxs) 42 # hxs = Selector(response=response).xpath('/html/body/ul/li/a/@href').extract() 43 # print(hxs) 44 # hxs = Selector(response=response).xpath('//body/ul/li/a/@href').extract_first() 45 # print(hxs) 46 47 # ul_list = Selector(response=response).xpath('//body/ul/li') 48 # for item in ul_list: 49 # v = item.xpath('./a/span') 50 # # 或 51 # # v = item.xpath('a/span') 52 # # 或 53 # # v = item.xpath('*/a/span') 54 # print(v)
1 import scrapy 2 from scrapy.selector import HtmlXPathSelector 3 from scrapy.http.request import Request 4 from scrapy.http.cookies import CookieJar 5 from scrapy import FormRequest 6 7 8 class ChouTiSpider(scrapy.Spider): 9 # 爬蟲應用的名稱,經過此名稱啓動爬蟲命令 10 name = "chouti" 11 # 容許的域名 12 allowed_domains = ["chouti.com"] 13 14 cookie_dict = {} 15 has_request_set = {} 16 17 def start_requests(self): 18 url = 'http://dig.chouti.com/' 19 # return [Request(url=url, callback=self.login)] 20 yield Request(url=url, callback=self.login) 21 22 def login(self, response): 23 cookie_jar = CookieJar() 24 cookie_jar.extract_cookies(response, response.request) 25 for k, v in cookie_jar._cookies.items(): 26 for i, j in v.items(): 27 for m, n in j.items(): 28 self.cookie_dict[m] = n.value 29 30 req = Request( 31 url='http://dig.chouti.com/login', 32 method='POST', 33 headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}, 34 body='phone=8615131255089&password=pppppppp&oneMonth=1', 35 cookies=self.cookie_dict, 36 callback=self.check_login 37 ) 38 yield req 39 40 def check_login(self, response): 41 req = Request( 42 url='http://dig.chouti.com/', 43 method='GET', 44 callback=self.show, 45 cookies=self.cookie_dict, 46 dont_filter=True 47 ) 48 yield req 49 50 def show(self, response): 51 # print(response) 52 hxs = HtmlXPathSelector(response) 53 news_list = hxs.select('//div[@id="content-list"]/div[@class="item"]') 54 for new in news_list: 55 # temp = new.xpath('div/div[@class="part2"]/@share-linkid').extract() 56 link_id = new.xpath('*/div[@class="part2"]/@share-linkid').extract_first() 57 yield Request( 58 url='http://dig.chouti.com/link/vote?linksId=%s' %(link_id,), 59 method='POST', 60 cookies=self.cookie_dict, 61 callback=self.do_favor 62 ) 63 64 page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract() 65 for page in page_list: 66 67 page_url = 'http://dig.chouti.com%s' % page 68 import hashlib 69 hash = hashlib.md5() 70 hash.update(bytes(page_url,encoding='utf-8')) 71 key = hash.hexdigest() 72 if key in self.has_request_set: 73 pass 74 else: 75 self.has_request_set[key] = page_url 76 yield Request( 77 url=page_url, 78 method='GET', 79 callback=self.show 80 ) 81 82 def do_favor(self, response): 83 print(response.text)
6. 格式化處理
1 import scrapy 2 3 4 class XiaoHuarItem(scrapy.Item): 5 name = scrapy.Field() 6 school = scrapy.Field() 7 url = scrapy.Field()
1 import json 2 import os 3 import requests 4 5 6 class JsonPipeline(object): 7 def __init__(self): 8 self.file = open('xiaohua.txt', 'w') 9 10 def process_item(self, item, spider): 11 v = json.dumps(dict(item), ensure_ascii=False) 12 self.file.write(v) 13 self.file.write('\n') 14 self.file.flush() 15 return item 16 17 18 class FilePipeline(object): 19 def __init__(self): 20 if not os.path.exists('imgs'): 21 os.makedirs('imgs') 22 23 def process_item(self, item, spider): 24 response = requests.get(item['url'], stream=True) 25 file_name = '%s_%s.jpg' % (item['name'], item['school']) 26 with open(os.path.join('imgs', file_name), mode='wb') as f: 27 f.write(response.content) 28 return item
1 ITEM_PIPELINES = { 2 'spider1.pipelines.JsonPipeline': 100, 3 'spider1.pipelines.FilePipeline': 300, 4 } 5 # 每行後面的整型值,肯定了他們運行的順序,item按數字從低到高的順序,經過pipeline,一般將這些數字定義在0-1000範圍內。
7. 自定義pipelines
1 from scrapy.exceptions import DropItem 2 3 class CustomPipeline(object): 4 def __init__(self,v): 5 self.value = v 6 7 def process_item(self, item, spider): 8 # 操做並進行持久化 9 10 # return表示會被後續的pipeline繼續處理 11 return item 12 13 # 表示將item丟棄,不會被後續pipeline處理 14 # raise DropItem() 15 16 17 @classmethod 18 def from_crawler(cls, crawler): 19 """ 20 初始化時候,用於建立pipeline對象 21 :param crawler: 22 :return: 23 """ 24 val = crawler.settings.getint('MMMM') 25 return cls(val) 26 27 def open_spider(self,spider): 28 """ 29 爬蟲開始執行時,調用 30 :param spider: 31 :return: 32 """ 33 print('000000') 34 35 def close_spider(self,spider): 36 """ 37 爬蟲關閉時,被調用 38 :param spider: 39 :return: 40 """ 41 print('111111') 42 43 自定義pipeline
8. 中間件
1 class SpiderMiddleware(object): 2 3 def process_spider_input(self,response, spider): 4 """ 5 下載完成,執行,而後交給parse處理 6 :param response: 7 :param spider: 8 :return: 9 """ 10 pass 11 12 def process_spider_output(self,response, result, spider): 13 """ 14 spider處理完成,返回時調用 15 :param response: 16 :param result: 17 :param spider: 18 :return: 必須返回包含 Request 或 Item 對象的可迭代對象(iterable) 19 """ 20 return result 21 22 def process_spider_exception(self,response, exception, spider): 23 """ 24 異常調用 25 :param response: 26 :param exception: 27 :param spider: 28 :return: None,繼續交給後續中間件處理異常;含 Response 或 Item 的可迭代對象(iterable),交給調度器或pipeline 29 """ 30 return None 31 32 33 def process_start_requests(self,start_requests, spider): 34 """ 35 爬蟲啓動時調用 36 :param start_requests: 37 :param spider: 38 :return: 包含 Request 對象的可迭代對象 39 """ 40 return start_requests 41 42 爬蟲中間件
1 class DownMiddleware1(object): 2 def process_request(self, request, spider): 3 """ 4 請求須要被下載時,通過全部下載器中間件的process_request調用 5 :param request: 6 :param spider: 7 :return: 8 None,繼續後續中間件去下載; 9 Response對象,中止process_request的執行,開始執行process_response 10 Request對象,中止中間件的執行,將Request從新調度器 11 raise IgnoreRequest異常,中止process_request的執行,開始執行process_exception 12 """ 13 pass 14 15 16 17 def process_response(self, request, response, spider): 18 """ 19 spider處理完成,返回時調用 20 :param response: 21 :param result: 22 :param spider: 23 :return: 24 Response 對象:轉交給其餘中間件process_response 25 Request 對象:中止中間件,request會被從新調度下載 26 raise IgnoreRequest 異常:調用Request.errback 27 """ 28 print('response1') 29 return response 30 31 def process_exception(self, request, exception, spider): 32 """ 33 當下載處理器(download handler)或 process_request() (下載中間件)拋出異常 34 :param response: 35 :param exception: 36 :param spider: 37 :return: 38 None:繼續交給後續中間件處理異常; 39 Response對象:中止後續process_exception方法 40 Request對象:中止中間件,request將會被從新調用下載 41 """ 42 return None 43 44 下載器中間件
9. 自定製命令
A.在spiders同級建立任意目錄,在目錄建立crawlall.py
1 from scrapy.commands import ScrapyCommand 2 from scrapy.utils.project import get_project_settings 3 4 5 class Command(ScrapyCommand): 6 7 requires_project = True 8 9 def syntax(self): 10 return '[options]' 11 12 def short_desc(self): 13 return 'Runs all of the spiders' 14 15 def run(self, args, opts): 16 spider_list = self.crawler_process.spiders.list() 17 for name in spider_list: 18 self.crawler_process.crawl(name, **opts.__dict__) 19 self.crawler_process.start()
B.在settings.py 中添加配置 COMMANDS_MODULE = '項目名稱.目錄名稱
C.scrapy crawlall
10. 避免重複訪問
scrapy默認使用 scrapy.dupefilter.RFPDupeFilter 進行去重,相關配置有:
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter' DUPEFILTER_DEBUG = False JOBDIR = "保存範文記錄的日誌路徑,如:/root/" # 最終路徑爲 /root/requests.seen
11. settings其它配置
1 # -*- coding: utf-8 -*- 2 3 # Scrapy settings for step8_king project 4 # 5 # For simplicity, this file contains only settings considered important or 6 # commonly used. You can find more settings consulting the documentation: 7 # 8 # http://doc.scrapy.org/en/latest/topics/settings.html 9 # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html 10 # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html 11 12 # 1. 爬蟲名稱 13 BOT_NAME = 'step8_king' 14 15 # 2. 爬蟲應用路徑 16 SPIDER_MODULES = ['step8_king.spiders'] 17 NEWSPIDER_MODULE = 'step8_king.spiders' 18 19 # Crawl responsibly by identifying yourself (and your website) on the user-agent 20 # 3. 客戶端 user-agent請求頭 21 # USER_AGENT = 'step8_king (+http://www.yourdomain.com)' 22 23 # Obey robots.txt rules 24 # 4. 禁止爬蟲配置 25 # ROBOTSTXT_OBEY = False 26 27 # Configure maximum concurrent requests performed by Scrapy (default: 16) 28 # 5. 併發請求數 29 # CONCURRENT_REQUESTS = 4 30 31 # Configure a delay for requests for the same website (default: 0) 32 # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay 33 # See also autothrottle settings and docs 34 # 6. 延遲下載秒數 35 # DOWNLOAD_DELAY = 2 36 37 38 # The download delay setting will honor only one of: 39 # 7. 單域名訪問併發數,而且延遲下次秒數也應用在每一個域名 40 # CONCURRENT_REQUESTS_PER_DOMAIN = 2 41 # 單IP訪問併發數,若是有值則忽略:CONCURRENT_REQUESTS_PER_DOMAIN,而且延遲下次秒數也應用在每一個IP 42 # CONCURRENT_REQUESTS_PER_IP = 3 43 44 # Disable cookies (enabled by default) 45 # 8. 是否支持cookie,cookiejar進行操做cookie 46 # COOKIES_ENABLED = True 47 # COOKIES_DEBUG = True 48 49 # Disable Telnet Console (enabled by default) 50 # 9. Telnet用於查看當前爬蟲的信息,操做爬蟲等... 51 # 使用telnet ip port ,而後經過命令操做 52 # TELNETCONSOLE_ENABLED = True 53 # TELNETCONSOLE_HOST = '127.0.0.1' 54 # TELNETCONSOLE_PORT = [6023,] 55 56 57 # 10. 默認請求頭 58 # Override the default request headers: 59 # DEFAULT_REQUEST_HEADERS = { 60 # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 61 # 'Accept-Language': 'en', 62 # } 63 64 65 # Configure item pipelines 66 # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html 67 # 11. 定義pipeline處理請求 68 # ITEM_PIPELINES = { 69 # 'step8_king.pipelines.JsonPipeline': 700, 70 # 'step8_king.pipelines.FilePipeline': 500, 71 # } 72 73 74 75 # 12. 自定義擴展,基於信號進行調用 76 # Enable or disable extensions 77 # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html 78 # EXTENSIONS = { 79 # # 'step8_king.extensions.MyExtension': 500, 80 # } 81 82 83 # 13. 爬蟲容許的最大深度,能夠經過meta查看當前深度;0表示無深度 84 # DEPTH_LIMIT = 3 85 86 # 14. 爬取時,0表示深度優先Lifo(默認);1表示廣度優先FiFo 87 88 # 後進先出,深度優先 89 # DEPTH_PRIORITY = 0 90 # SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue' 91 # SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue' 92 # 先進先出,廣度優先 93 94 # DEPTH_PRIORITY = 1 95 # SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue' 96 # SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue' 97 98 # 15. 調度器隊列 99 # SCHEDULER = 'scrapy.core.scheduler.Scheduler' 100 # from scrapy.core.scheduler import Scheduler 101 102 103 # 16. 訪問URL去重 104 # DUPEFILTER_CLASS = 'step8_king.duplication.RepeatUrl' 105 106 107 # Enable and configure the AutoThrottle extension (disabled by default) 108 # See http://doc.scrapy.org/en/latest/topics/autothrottle.html 109 110 """ 111 17. 自動限速算法 112 from scrapy.contrib.throttle import AutoThrottle 113 自動限速設置 114 1. 獲取最小延遲 DOWNLOAD_DELAY 115 2. 獲取最大延遲 AUTOTHROTTLE_MAX_DELAY 116 3. 設置初始下載延遲 AUTOTHROTTLE_START_DELAY 117 4. 當請求下載完成後,獲取其"鏈接"時間 latency,即:請求鏈接到接受到響應頭之間的時間 118 5. 用於計算的... AUTOTHROTTLE_TARGET_CONCURRENCY 119 target_delay = latency / self.target_concurrency 120 new_delay = (slot.delay + target_delay) / 2.0 # 表示上一次的延遲時間 121 new_delay = max(target_delay, new_delay) 122 new_delay = min(max(self.mindelay, new_delay), self.maxdelay) 123 slot.delay = new_delay 124 """ 125 126 # 開始自動限速 127 # AUTOTHROTTLE_ENABLED = True 128 # The initial download delay 129 # 初始下載延遲 130 # AUTOTHROTTLE_START_DELAY = 5 131 # The maximum download delay to be set in case of high latencies 132 # 最大下載延遲 133 # AUTOTHROTTLE_MAX_DELAY = 10 134 # The average number of requests Scrapy should be sending in parallel to each remote server 135 # 平均每秒併發數 136 # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 137 138 # Enable showing throttling stats for every response received: 139 # 是否顯示 140 # AUTOTHROTTLE_DEBUG = True 141 142 # Enable and configure HTTP caching (disabled by default) 143 # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings 144 145 146 """ 147 18. 啓用緩存 148 目的用於將已經發送的請求或相應緩存下來,以便之後使用 149 150 from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware 151 from scrapy.extensions.httpcache import DummyPolicy 152 from scrapy.extensions.httpcache import FilesystemCacheStorage 153 """ 154 # 是否啓用緩存策略 155 # HTTPCACHE_ENABLED = True 156 157 # 緩存策略:全部請求均緩存,下次在請求直接訪問原來的緩存便可 158 # HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy" 159 # 緩存策略:根據Http響應頭:Cache-Control、Last-Modified 等進行緩存的策略 160 # HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy" 161 162 # 緩存超時時間 163 # HTTPCACHE_EXPIRATION_SECS = 0 164 165 # 緩存保存路徑 166 # HTTPCACHE_DIR = 'httpcache' 167 168 # 緩存忽略的Http狀態碼 169 # HTTPCACHE_IGNORE_HTTP_CODES = [] 170 171 # 緩存存儲的插件 172 # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' 173 174 175 """ 176 19. 代理,須要在環境變量中設置 177 from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware 178 179 方式一:使用默認 180 os.environ 181 { 182 http_proxy:http://root:woshiniba@192.168.11.11:9999/ 183 https_proxy:http://192.168.11.11:9999/ 184 } 185 方式二:使用自定義下載中間件 186 187 def to_bytes(text, encoding=None, errors='strict'): 188 if isinstance(text, bytes): 189 return text 190 if not isinstance(text, six.string_types): 191 raise TypeError('to_bytes must receive a unicode, str or bytes ' 192 'object, got %s' % type(text).__name__) 193 if encoding is None: 194 encoding = 'utf-8' 195 return text.encode(encoding, errors) 196 197 class ProxyMiddleware(object): 198 def process_request(self, request, spider): 199 PROXIES = [ 200 {'ip_port': '111.11.228.75:80', 'user_pass': ''}, 201 {'ip_port': '120.198.243.22:80', 'user_pass': ''}, 202 {'ip_port': '111.8.60.9:8123', 'user_pass': ''}, 203 {'ip_port': '101.71.27.120:80', 'user_pass': ''}, 204 {'ip_port': '122.96.59.104:80', 'user_pass': ''}, 205 {'ip_port': '122.224.249.122:8088', 'user_pass': ''}, 206 ] 207 proxy = random.choice(PROXIES) 208 if proxy['user_pass'] is not None: 209 request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port']) 210 encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass'])) 211 request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass) 212 print "**************ProxyMiddleware have pass************" + proxy['ip_port'] 213 else: 214 print "**************ProxyMiddleware no pass************" + proxy['ip_port'] 215 request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port']) 216 217 DOWNLOADER_MIDDLEWARES = { 218 'step8_king.middlewares.ProxyMiddleware': 500, 219 } 220 221 """ 222 223 """ 224 20. Https訪問 225 Https訪問時有兩種狀況: 226 1. 要爬取網站使用的可信任證書(默認支持) 227 DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory" 228 DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory" 229 230 2. 要爬取網站使用的自定義證書 231 DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory" 232 DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory" 233 234 # https.py 235 from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory 236 from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate) 237 238 class MySSLFactory(ScrapyClientContextFactory): 239 def getCertificateOptions(self): 240 from OpenSSL import crypto 241 v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read()) 242 v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read()) 243 return CertificateOptions( 244 privateKey=v1, # pKey對象 245 certificate=v2, # X509對象 246 verify=False, 247 method=getattr(self, 'method', getattr(self, '_ssl_method', None)) 248 ) 249 其餘: 250 相關類 251 scrapy.core.downloader.handlers.http.HttpDownloadHandler 252 scrapy.core.downloader.webclient.ScrapyHTTPClientFactory 253 scrapy.core.downloader.contextfactory.ScrapyClientContextFactory 254 相關配置 255 DOWNLOADER_HTTPCLIENTFACTORY 256 DOWNLOADER_CLIENTCONTEXTFACTORY 257 258 """ 259 260 261 262 """ 263 21. 爬蟲中間件 264 class SpiderMiddleware(object): 265 266 def process_spider_input(self,response, spider): 267 ''' 268 下載完成,執行,而後交給parse處理 269 :param response: 270 :param spider: 271 :return: 272 ''' 273 pass 274 275 def process_spider_output(self,response, result, spider): 276 ''' 277 spider處理完成,返回時調用 278 :param response: 279 :param result: 280 :param spider: 281 :return: 必須返回包含 Request 或 Item 對象的可迭代對象(iterable) 282 ''' 283 return result 284 285 def process_spider_exception(self,response, exception, spider): 286 ''' 287 異常調用 288 :param response: 289 :param exception: 290 :param spider: 291 :return: None,繼續交給後續中間件處理異常;含 Response 或 Item 的可迭代對象(iterable),交給調度器或pipeline 292 ''' 293 return None 294 295 296 def process_start_requests(self,start_requests, spider): 297 ''' 298 爬蟲啓動時調用 299 :param start_requests: 300 :param spider: 301 :return: 包含 Request 對象的可迭代對象 302 ''' 303 return start_requests 304 305 內置爬蟲中間件: 306 'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50, 307 'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500, 308 'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700, 309 'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800, 310 'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900, 311 312 """ 313 # from scrapy.contrib.spidermiddleware.referer import RefererMiddleware 314 # Enable or disable spider middlewares 315 # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html 316 SPIDER_MIDDLEWARES = { 317 # 'step8_king.middlewares.SpiderMiddleware': 543, 318 } 319 320 321 """ 322 22. 下載中間件 323 class DownMiddleware1(object): 324 def process_request(self, request, spider): 325 ''' 326 請求須要被下載時,通過全部下載器中間件的process_request調用 327 :param request: 328 :param spider: 329 :return: 330 None,繼續後續中間件去下載; 331 Response對象,中止process_request的執行,開始執行process_response 332 Request對象,中止中間件的執行,將Request從新調度器 333 raise IgnoreRequest異常,中止process_request的執行,開始執行process_exception 334 ''' 335 pass 336 337 338 339 def process_response(self, request, response, spider): 340 ''' 341 spider處理完成,返回時調用 342 :param response: 343 :param result: 344 :param spider: 345 :return: 346 Response 對象:轉交給其餘中間件process_response 347 Request 對象:中止中間件,request會被從新調度下載 348 raise IgnoreRequest 異常:調用Request.errback 349 ''' 350 print('response1') 351 return response 352 353 def process_exception(self, request, exception, spider): 354 ''' 355 當下載處理器(download handler)或 process_request() (下載中間件)拋出異常 356 :param response: 357 :param exception: 358 :param spider: 359 :return: 360 None:繼續交給後續中間件處理異常; 361 Response對象:中止後續process_exception方法 362 Request對象:中止中間件,request將會被從新調用下載 363 ''' 364 return None 365 366 367 默認下載中間件 368 { 369 'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100, 370 'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300, 371 'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350, 372 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400, 373 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500, 374 'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550, 375 'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580, 376 'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590, 377 'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600, 378 'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700, 379 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750, 380 'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830, 381 'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850, 382 'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900, 383 } 384 385 """ 386 # from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware 387 # Enable or disable downloader middlewares 388 # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html 389 # DOWNLOADER_MIDDLEWARES = { 390 # 'step8_king.middlewares.DownMiddleware1': 100, 391 # 'step8_king.middlewares.DownMiddleware2': 500, 392 # } 393 394 settings