scrapy 中間鍵html
1. pip install wheel pip install twisted pip install pywin32 pip install scrapy 2. scrapy startproject ProName cd ProName scrapy genspider projectName www.xxx.com 3. 目錄結構: spider: 放置一個爬蟲文件 item: xxx = Filed() pipeline: - process_item(): - open_spider(): - close_spider(): setting: 配置文件 4.爬蟲文件 - name: - start_urls: - parse(self,response): - xpath():列表元素Selector對象.extract() - 持久化存儲: -終端指令: 只可將parse方法的返回值進行本地文件的持久化存儲 - scrapy crawl xxx -o ./xxx.csv - 管道: 1. 數據解析 2. 將解析到的數據封裝到item類型的對象 3. 經過yield向管道提交item對象 4. 在管道的process_item 方法中執行io操做,進行持久化配置 5. 在配置文件中開啓管道 注意: process_item 方法中返回值的做業: - 手動請求的發送: - yield scrapy.Request(url,callback)
# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html import random from scrapy import signals class MiddleproSpiderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, dict or Item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict # or Item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class MiddleproDownloaderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. user_agent_list = [ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 " "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 " "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 " "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 " "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 " "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 " "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 " "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 " "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 " "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ] # 可選備用的代理IP PROXY_http = [ '153.180.102.104:80', '195.208.131.189:56055', ] PROXY_https = [ '120.83.49.90:9000', '95.189.112.214:35508', ] @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s # 攔截全部未發生異常的請求 def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called # 使用UA池進行請求的UA假裝 print('this is process_request') request.headers['User_Agent'] = random.choice(self.user_agent_list) print('User_Agent', request.headers['User_Agent']) # #使用代理池進行請求代理ip的設置 # if request.url.split(':')[0] == 'http': # request.meta['proxy'] = random.choice(self.PROXY_http) # else: # request.meta['proxy'] = random.choice(self.PROXY_https) return None # 攔截全部的響應 def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response # 攔截到產生異常的請求 def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain print('this is process_exception') if request.url.split(':')[0]: request.meta['proxy'] = random.choice(self.PROXY_http) else: request.meta['proxy'] = random.choice(self.PROXY_https) def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
movie_pro.pypython
# -*- coding: utf-8 -*- import scrapy from movie.items import MovieItem class MoveProSpider(scrapy.Spider): name = 'movie_pro' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.4567tv.tv/frim/index1.html'] # 解析詳情頁面的數據 def parse_detail(self, response): # response,meta 返回接受到的meta 字典 item = response.meta['item'] actor = response.xpath('/html/body/div[1]/div/div/div/div[2]/p[3]/a/text()').extract_first() item['actor'] = actor yield item def parse(self, response): li_list = response.xpath('//li[@class="col-md-6 col-sm-4 col-xs-3"]') for li in li_list: item = MovieItem() name = li.xpath('./div/a/@title').extract_first() detail_url = 'https://www.4567tv.tv' + li.xpath('./div/a/@href').extract_first() item['name'] = name # meta 參數: 請求傳參,meta字典會傳遞給回調函數的response參數 yield scrapy.Request(url=detail_url, callback=self.parse_detail, meta={'item': item})
# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals from scrapy.http import HtmlResponse from time import sleep class WangyiproSpiderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, dict or Item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict # or Item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class WangyiproDownloaderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest # 如何獲取動態加載出來的數據 bro = spider.bro bro.get(url=request.url) sleep(3) # 包含了動態加載出來的新聞數據 page_text = bro.page_source sleep(3) return HtmlResponse(url=spider.bro.current_url, body=page_text, encoding='utf-8', request=request) def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
wangyi.pyweb
# -*- coding: utf-8 -*- import scrapy from selenium import webdriver """ 在scrapy中使用selenium的編碼流程: 1.在spider的構造方法中建立一個瀏覽器對象(做爲當前spider的一個屬性) 2.重寫spider的一個方法closed(self,spider),在該方法中執行瀏覽器關閉的操做 3.在下載中間鍵得process_response方法中,經過spider參數獲取瀏覽器對象 4.在中間鍵得process_response定製基於瀏覽器自動化的操做代碼(獲取動態加載出來的頁面源碼數據) 5.實例化一個響應對象,且將page_source返回的頁面源碼封裝到該對象中 6.返回該新的響應對象 """ class WangyiSpider(scrapy.Spider): name = 'wangyi' # allowed_domains = ['www.example.com'] start_urls = ['http://war.163.com/'] def __init__(self): self.bro = webdriver.Chrome(executable_path=r'C:\Users\Administrator\Desktop\爬蟲+數據\day_03_爬蟲\chromedriver.exe') def parse(self, response): div_list = response.xpath('//div[@class="data_row news_article clearfix "]') for div in div_list: title = div.xpath('.//div[@class="news_title"]/h3/a/text()').extract_first() print(title) def closed(self, spider): print('關閉瀏覽器對象!') self.bro.quit()