scrapy_redisphp
1. scrapy startproject chouti 2. cd chouti 3. scrapy genspider -t crawl chouti www.baidu.com
setting.pyhtml
LOG_LEVEL = 'ERROR' # 日誌文件等級 ROBOTSTXT_OBEY = False # 不遵循robots協議 USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' # UA假裝
# -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule class ChoutiSpider(CrawlSpider): # name = 'chouti' # # allowed_domains = ['www.xxx.com'] # start_urls = ['https://dig.chouti.com/r/scoff/hot/1'] # # #鏈接提取器: # #allow:表示的就是連接提取器提取鏈接的規則(正則) # link = LinkExtractor(allow=r'/r/scoff/hot/\d+') # # rules = ( # #規則解析器:將連接提取器提取到的鏈接所對應的頁面數據進行指定形式的解析 # Rule(link, callback='parse_item', follow=True), # # 讓鏈接提取器繼續做用到連接提取器提取到的鏈接所對應的頁面中 # ) # # def parse_item(self, response): # print(response) name = 'qiubai' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.qiushibaike.com/pic/'] # 鏈接提取器: # allow:表示的就是連接提取器提取鏈接的規則(正則)/pic/page/3?s=5172496 link = LinkExtractor(allow=r'/pic/page/\d+\?s=\d+') link1 = LinkExtractor(allow=r'/pic/$') # link1 = LinkExtractor(allow=r'') rules = ( # 規則解析器:將連接提取器提取到的鏈接所對應的頁面數據進行指定形式的解析 Rule(link, callback='parse_item', follow=True), # 讓鏈接提取器繼續做用到連接提取器提取到的鏈接所對應的頁面中 Rule(link1, callback='parse_item', follow=True), ) def parse_item(self, response): print(response) # -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from redis import Redis from increment1_Pro.items import Increment1ProItem class MovieSpider(CrawlSpider): name = 'movie' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.4567tv.tv/index.php/vod/show/id/7.html'] rules = ( Rule(LinkExtractor(allow=r'/index.php/vod/show/id/7/page/\d+\.html'), callback='parse_item', follow=True), ) def parse_item(self, response): conn = Redis(host='127.0.0.1',port=6379) detail_url_list = 'https://www.4567tv.tv'+response.xpath('//li[@class="col-md-6 col-sm-4 col-xs-3"]/div/a/@href').extract() for url in detail_url_list: #ex == 1:set中沒有存儲url ex = conn.sadd('movies_url',url) if ex == 1: yield scrapy.Request(url=url,callback=self.parse_detail) else: print('網站沒有更新數據,暫無新數據可爬!') def parse_detail(self,response): item = Increment1ProItem() item['name'] = response.xpath('/html/body/div[1]/div/div/div/div[2]/h1/text()').extract_first() item['actor'] = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[3]/a/text()').extract_first() yield item # -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from increment2_Pro.items import Increment2ProItem from redis import Redis import hashlib class QiubaiSpider(CrawlSpider): name = 'qiubai' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.qiushibaike.com/text/'] rules = ( Rule(LinkExtractor(allow=r'/text/page/\d+/'), callback='parse_item', follow=True), ) def parse_item(self, response): div_list = response.xpath('//div[@class="article block untagged mb15 typs_hot"]') conn = Redis(host='127.0.0.1',port=6379) for div in div_list: item = Increment2ProItem() item['content'] = div.xpath('.//div[@class="content"]/span//text()').extract() item['content'] = ''.join(item['content']) item['author'] = div.xpath('./div/a[2]/h2/text() | ./div[1]/span[2]/h2/text()').extract_first() source = item['author']+item['content'] #本身制定了一種形式的數據指紋 hashValue = hashlib.sha256(source.encode()).hexdigest() ex = conn.sadd('qiubai_hash',hashValue) if ex == 1: yield item else: print('沒有更新數據可爬!!!')
1.請求傳參(item): - 應用場景:解析的數據不在同一張頁面中 - Request(callback,meta={}) 2.LOG_LEVEL LOG_FILE 3.下載中間件: - 批量攔截請求(代理ip和UA)和響應(處理頁面數據) 4.如何在scrapy使用selenium 1.在spider的init方法中實例化一個瀏覽器對象 2.在spider的closed方法中關閉瀏覽器對象 3.在下載中間件類的process_response方法中接收spider中的瀏覽器對象 4.處理執行相關自動化操做(發起請求,獲取頁面數據) 5.實例化一個新的響應對象(from scrapy.http import HtmlResponse),且將頁面數據存儲到該對象中 6.返回新的響應對象 7.在配置文件中開啓中間件 5.如何提高scrapy爬取數據的效率: 增長併發: 默認scrapy開啓的併發線程爲32個,能夠適當進行增長。在settings配置文件中修改CONCURRENT_REQUESTS = 100值爲100,併發設置成了爲100。 下降日誌級別: 在運行scrapy時,會有大量日誌信息的輸出,爲了減小CPU的使用率。能夠設置log輸出信息爲INFO或者ERROR便可。在配置文件中編寫:LOG_LEVEL = ‘INFO’ 禁止cookie: 若是不是真的須要cookie,則在scrapy爬取數據時能夠禁止cookie從而減小CPU的使用率,提高爬取效率。在配置文件中編寫:COOKIES_ENABLED = False 禁止重試: 對失敗的HTTP進行從新請求(重試)會減慢爬取速度,所以能夠禁止重試。在配置文件中編寫:RETRY_ENABLED = False 減小下載超時: 若是對一個很是慢的連接進行爬取,減小下載超時能夠能讓卡住的連接快速被放棄,從而提高效率。在配置文件中進行編寫:DOWNLOAD_TIMEOUT = 10 超時時間爲10s