scrapy框架 小知識

 

持久化

步驟

pipeline/items
	a. 先寫pipeline類
		class XXXPipeline(object):
			def process_item(self, item, spider):
				return item
				
	b. 寫Item類
		class XdbItem(scrapy.Item):
			href = scrapy.Field()
			title = scrapy.Field()
					
	c. 配置
		ITEM_PIPELINES = {
		   'xdb.pipelines.XdbPipeline': 300,
		}
	
	d. 爬蟲,yield每執行一次,process_item就調用一次。
		
		yield Item對象

編寫pipeline

from scrapy.exceptions import DropItem

class FilePipeline(object):

	def __init__(self,path):
		self.f = None
		self.path = path

	@classmethod
	def from_crawler(cls, crawler):
		"""
		初始化時候,用於建立pipeline對象
		:param crawler:
		:return:
		"""
		print('File.from_crawler')
		path = crawler.settings.get('HREF_FILE_PATH')
		return cls(path)

	def open_spider(self,spider):
		"""
		爬蟲開始執行時,調用
		:param spider:
		:return:
		"""
		print('File.open_spider')
		self.f = open(self.path,'a+')

	def process_item(self, item, spider):
		# f = open('xx.log','a+')
		# f.write(item['href']+'\n')
		# f.close()
		print('File',item['href'])
		self.f.write(item['href']+'\n')
		
		# return item  	# 交給下一個pipeline的process_item方法
		raise DropItem()# 後續的 pipeline的process_item方法再也不執行

	def close_spider(self,spider):
		"""
		爬蟲關閉時,被調用
		:param spider:
		:return:
		"""
		print('File.close_spider')
		self.f.close()

注意:pipeline是全部爬蟲公用,若是想要給某個爬蟲定製須要使用spider參數本身進行處理。 url

去重規則

編寫類

from scrapy.dupefilter import BaseDupeFilter
from scrapy.utils.request import request_fingerprint

class XdbDupeFilter(BaseDupeFilter):

	def __init__(self):
		self.visited_fd = set()

	@classmethod
	def from_settings(cls, settings):
		return cls()

	def request_seen(self, request):
		fd = request_fingerprint(request=request)
		if fd in self.visited_fd:
			return True
		self.visited_fd.add(fd)

	def open(self):  # can return deferred
		print('開始')

	def close(self, reason):  # can return a deferred
		print('結束')

	# def log(self, request, spider):  # log that a request has been filtered
	#     print('日誌')

配置  

# 修改默認的去重規則
# DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
DUPEFILTER_CLASS = 'xdb.dupefilters.XdbDupeFilter'

爬蟲使用 

class ChoutiSpider(scrapy.Spider):
	name = 'chouti'
	allowed_domains = ['chouti.com']
	start_urls = ['https://dig.chouti.com/']

	def parse(self, response):
		print(response.request.url)
		# item_list = response.xpath('//div[@id="content-list"]/div[@class="item"]')
		# for item in item_list:
		#     text = item.xpath('.//a/text()').extract_first()
		#     href = item.xpath('.//a/@href').extract_first()

		page_list = response.xpath('//div[@id="dig_lcpage"]//a/@href').extract()
		for page in page_list:
			from scrapy.http import Request
			page = "https://dig.chouti.com" + page
			# yield Request(url=page,callback=self.parse,dont_filter=False) # https://dig.chouti.com/all/hot/recent/2
			yield Request(url=page,callback=self.parse,dont_filter=True) # https://dig.chouti.com/all/hot/recent/2 

注意:
- request_seen中編寫正確邏輯
- dont_filter=Falsespa

深度

配置文件:
	# 限制深度
	DEPTH_LIMIT = 3

cookie

方式一:
	- 攜帶 
		Request(
			url='https://dig.chouti.com/login',
			method='POST',
			body="phone=8613121758648&password=woshiniba&oneMonth=1",# # body=urlencode({})"phone=8615131255555&password=12sdf32sdf&oneMonth=1"
			cookies=self.cookie_dict,
			headers={
				'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
			},
			callback=self.check_login
		)
	
	- 解析:
			cookie_dict
			cookie_jar = CookieJar()
			cookie_jar.extract_cookies(response, response.request)

			# 去對象中將cookie解析到字典
			for k, v in cookie_jar._cookies.items():
				for i, j in v.items():
					for m, n in j.items():
						cookie_dict[m] = n.value

start_url

內部原理

scrapy引擎來爬蟲中取起始URL:
	1. 調用start_requests並獲取返回值
	2. v = iter(返回值)
	3. 
		req1 = 執行 v.__next__()
		req2 = 執行 v.__next__()
		req3 = 執行 v.__next__()
		...
	4. req所有放到調度器中  

編寫 

class ChoutiSpider(scrapy.Spider):
	name = 'chouti'
	allowed_domains = ['chouti.com']
	start_urls = ['https://dig.chouti.com/']
	cookie_dict = {}
	
	def start_requests(self):
		# 方式一:
		for url in self.start_urls:
			yield Request(url=url)
		# 方式二:
		# req_list = []
		# for url in self.start_urls:
		#     req_list.append(Request(url=url))
		# return req_list 

定製:能夠去redis獲取 代理

深度和優先級

深度

- 最開始是0
- 每次yield時,會根據原來請求中的depth + 1
配置:DEPTH_LIMIT 深度控制

優先級

- 請求被下載的優先級 -= 深度 * 配置 DEPTH_PRIORITY 
配置:DEPTH_PRIORITY 

下載中間件

scrapy中設置代理 

內置

在爬蟲啓動時,提早在os.envrion中設置代理便可。
	class ChoutiSpider(scrapy.Spider):
		name = 'chouti'
		allowed_domains = ['chouti.com']
		start_urls = ['https://dig.chouti.com/']
		cookie_dict = {}

		def start_requests(self):
			import os
			os.environ['HTTPS_PROXY'] = "http://root:woshiniba@192.168.11.11:9999/"
			os.environ['HTTP_PROXY'] = '19.11.2.32',
			for url in self.start_urls:
				yield Request(url=url,callback=self.parse)

meta(每次發請求都要本身攜帶) 

class ChoutiSpider(scrapy.Spider):
	name = 'chouti'
	allowed_domains = ['chouti.com']
	start_urls = ['https://dig.chouti.com/']
	cookie_dict = {}

	def start_requests(self):
		for url in self.start_urls:
			yield Request(url=url,callback=self.parse,meta={'proxy':'"http://root:woshiniba@192.168.11.11:9999/"'})

自定義

import base64
import random
from six.moves.urllib.parse import unquote
try:
from urllib2 import _parse_proxy
except ImportError:
from urllib.request import _parse_proxy
from six.moves.urllib.parse import urlunparse
from scrapy.utils.python import to_bytes

class XdbProxyMiddleware(object):

def _basic_auth_header(self, username, password):
	user_pass = to_bytes(
		'%s:%s' % (unquote(username), unquote(password)),
		encoding='latin-1')
	return base64.b64encode(user_pass).strip()

def process_request(self, request, spider):
	PROXIES = [
		"http://root:woshiniba@192.168.11.11:9999/",
		"http://root:woshiniba@192.168.11.12:9999/",
		"http://root:woshiniba@192.168.11.13:9999/",
		"http://root:woshiniba@192.168.11.14:9999/",
		"http://root:woshiniba@192.168.11.15:9999/",
		"http://root:woshiniba@192.168.11.16:9999/",
	]
	url = random.choice(PROXIES)

	orig_type = ""
	proxy_type, user, password, hostport = _parse_proxy(url)
	proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))

	if user:
		creds = self._basic_auth_header(user, password)
	else:
		creds = None
	request.meta['proxy'] = proxy_url
	if creds:
		request.headers['Proxy-Authorization'] = b'Basic ' + creds
相關文章
相關標籤/搜索