import scrapy class MiddleSpider(scrapy.Spider): name = 'middle'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://www.baidu.com/s?wd=ip'] def parse(self, response): page_text = response.text # 這裏直接保存一個頁面看一下效果,就不寫到數據庫啦
with open('./ip.html', 'w', encoding='utf-8') as f: f.write(page_text)
1 from scrapy import signals 2 import random 3 class MiddleproDownloaderMiddleware(object): 4 5 # 這是UA池,基本包含了各大瀏覽器的UA 6 user_agent_list = [ 7 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 " 8 "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", 9 "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 " 10 "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", 11 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 " 12 "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", 13 "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 " 14 "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", 15 "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 " 16 "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", 17 "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 " 18 "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", 19 "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 " 20 "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", 21 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " 22 "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", 23 "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 " 24 "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", 25 "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 " 26 "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", 27 "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " 28 "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", 29 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " 30 "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", 31 "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " 32 "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", 33 "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 " 34 "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", 35 "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 " 36 "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", 37 "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 " 38 "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", 39 "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 " 40 "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", 41 "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 " 42 "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" 43 ] 44 PROXY_http = [ 45 '153.180.102.104:80', 46 '195.208.131.189:56055', 47 ] 48 PROXY_https = [ 49 '120.83.49.90:9000', 50 '95.189.112.214:35508', 51 ] 52 @classmethod 53 def from_crawler(cls, crawler): 54 # This method is used by Scrapy to create your spiders. 55 s = cls() 56 crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) 57 return s 58 59 # 能夠處理攔截到全部的非異常的請求 60 # spider參數表示的就是爬蟲類實例化的一個對象 61 def process_request(self, request, spider): 62 print('this is process_request()') 63 #UA假裝 64 request.headers['User-Agent'] = random.choice(self.user_agent_list) 65 66 # 測試:代理操做是否生效 67 request.meta['proxy'] = 'https://218.60.8.83:3129' 68 return None 69 # 攔截全部的響應 70 def process_response(self, request, response, spider): 71 72 return response 73 # 攔截髮生異常的請求對象 74 def process_exception(self, request, exception, spider): 75 if request.url.split(':')[0] == 'https': 76 request.meta['proxy'] = 'https://'+random.choice(self.PROXY_https) 77 else: 78 request.meta['proxy'] = 'http://' + random.choice(self.PROXY_http) 79 80 def spider_opened(self, spider): 81 spider.logger.info('Spider opened: %s' % spider.name)
# -*- coding: utf-8 -*-
import scrapy from ..items import MiddleWareItem from selenium import webdriver class NewsSpider(scrapy.Spider): # scrapy中應用selenium獲取動態加載數據
browser = webdriver.Chrome(r'D:\spider\chromedriver.exe') name = 'news' sort_urls = [] # 放兩個分類的url
# allowed_domains = ['www.xxx.com']
start_urls = ['https://news.163.com/'] def newsContentParse(self, response): item = response.meta['item'] # 解析新聞內容,而後直接存儲到item中
content_list = response.xpath('//div[@id="endText"]//text()').extract() item['news_content'] = ''.join(content_list) yield item # 用來解析分類對應頁面中的新聞數據
def parse_detail(self, response): div_list = response.xpath('//div[@class="ndi_main"]/div') for div in div_list: news_title = div.xpath('.//h3/a/text()').extract_first() detail_news_url = div.xpath('.//h3/a/@href').extract_first() item = MiddleWareItem() item['news_title'] = news_title # 獲取新聞的內容,進行請求傳參,將item傳遞給下一個解析方法
yield scrapy.Request(url=detail_news_url, callback=self.newsContentParse, meta={'item': item}) def parse(self, response): # 解析出兩個分類對應url,爲了使用一個爬蟲函數,要保證這兩個分類排版同樣
li_list = response.xpath('//div[@class="ns_area list"]/ul/li') # 取到國內和國際新聞的li標籤索引
indexs = [3, 4] sort_li_list = [] # 放置選出的兩個分類對應的li
for index in indexs: li = li_list[index] sort_li_list.append(li) # 解析出兩個板塊的url
for li in sort_li_list: sort_url = li.xpath('./a/@href').extract_first() self.sort_urls.append(sort_url) # 對每個分類的url發起請求獲取詳情頁的頁面源碼數據
yield scrapy.Request(url=sort_url, callback=self.parse_detail)
from scrapy import signals from time import sleep from scrapy.http import HtmlResponse class MiddleWareDownloaderMiddleware(object): def process_request(self, request, spider): return None # 改方法能夠攔截到全部的響應對象(需求中須要處理的是指定的某些響應對象)
def process_response(self, request, response, spider): """ 1.找出指定的響應對象進行處理操做 2.能夠根據指定的請求對象定位到指定的響應對象 3.指定的請求對象能夠經過請求的url進行定位 4.定位指定的url方法: spider.sort_urls """ sort_urls = spider.sort_urls browser = spider.browser if request.url in sort_urls: """ 1.經過指定的url就定位到了指定request 2.經過指定request就定位到了指定的response(不符合需求的) 3.本身手動建立2個符合要求的新的響應對象 4.使用新的響應對象替換原始的響應對象 """ browser.get(request.url) # 使用瀏覽器對兩類板塊對應的url發起請求
sleep(2) # 看清楚一點
js = 'window.scrollTo(0,document.body.scrollHeight)' browser.execute_script(js) sleep(2) # 頁面源碼數據中包含來了動態加載出來的新聞數據
page_text = browser.page_source # 手動建立一個新的響應對象,將page_text做爲響應數據封裝到響應對象中
return HtmlResponse(url=browser.current_url, body=page_text, encoding='utf-8', request=request) # body參數表示的就是響應數據
return response
import scrapy class MiddleWareItem(scrapy.Item): news_title = scrapy.Field() news_content = scrapy.Field()