scrapy_redis使用

URL去重html

定義去重規則(被調度器調用並應用)
 
    a. 內部會使用如下配置進行鏈接Redis
 
        # REDIS_HOST = 'localhost'                            # 主機名
        # REDIS_PORT = 6379                                   # 端口
        # REDIS_URL = 'redis://user:pass@hostname:9001'       # 鏈接URL(優先於以上配置)
        # REDIS_PARAMS  = {}                                  # Redis鏈接參數             默認:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,})
        # REDIS_PARAMS['redis_cls'] = 'myproject.RedisClient' # 指定鏈接Redis的Python模塊  默認:redis.StrictRedis
        # REDIS_ENCODING = "utf-8"                            # redis編碼類型             默認:'utf-8'
     
    b. 去重規則經過redis的集合完成,集合的Key爲:
     
        key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
        默認配置:
            DUPEFILTER_KEY = 'dupefilter:%(timestamp)s'
              
    c. 去重規則中將url轉換成惟一標示,而後在redis中檢查是否已經在集合中存在
     
        from scrapy.utils import request
        from scrapy.http import Request
         
        req = Request(url='http://www.cnblogs.com/wupeiqi.html')
        result = request.request_fingerprint(req)
        print(result) # 8ea4fd67887449313ccc12e5b6b92510cc53675c
         
         
        PS:
            - URL參數位置不一樣時,計算結果一致;
            - 默認請求頭不在計算範圍,include_headers能夠設置指定請求頭
            示例:
                from scrapy.utils import request
                from scrapy.http import Request
                 
                req = Request(url='http://www.baidu.com?name=8&id=1',callback=lambda x:print(x),cookies={'k1':'vvvvv'})
                result = request.request_fingerprint(req,include_headers=['cookies',])
                 
                print(result)
                 
                req = Request(url='http://www.baidu.com?id=1&name=8',callback=lambda x:print(x),cookies={'k1':666})
                 
                result = request.request_fingerprint(req,include_headers=['cookies',])
                 
                print(result)
         
"""
# Ensure all spiders share same duplicates filter through redis.
# DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

 

 

REDIS_HOST = '192.168.16.86'                        # 主機名
REDIS_PORT = 6379                                   # 端口
# REDIS_URL = 'redis://user:pass@hostname:9001'       # 鏈接URL(優先於以上配置)
# REDIS_PARAMS  = {}                                  # Redis鏈接參數             默認:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,})
# REDIS_PARAMS['redis_cls'] = 'redis.StrictRedis' # 指定鏈接Redis的Python模塊  默認:redis.StrictRedis
REDIS_ENCODING = "utf-8"                            # redis編碼類型             默認:'utf-8'


SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'          # 默認使用優先級隊列(默認),其餘:PriorityQueue(有序集合),FifoQueue(列表)、LifoQueue(列表)
"""
每個爬蟲,都有本身scrapy-redis中的隊列,在redis中對應的一個key
renjian:requests: ['http://www.baidu.com','http://www.baidu.com','http://www.baidu.com','http://www.baidu.com','http://www.baidu.com','http://www.baidu.com',]
jianren:requests: ['http://www.daboa.com','http://www.daboa.com','http://www.daboa.com','http://www.daboa.com',]
"""
SCHEDULER_QUEUE_KEY = '%(spider)s:requests'                         # 調度器中請求存放在redis中的key
SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"                  # 對保存到redis中的數據進行序列化,默認使用pickle

SCHEDULER_PERSIST = True                                             # 是否在關閉時候保留原來的調度器和去重記錄,True=保留,False=清空
SCHEDULER_FLUSH_ON_START = False                                     # 是否在開始以前清空 調度器和去重記錄,True=清空,False=不清空

SCHEDULER_IDLE_BEFORE_CLOSE = 10                                    # 去調度器中獲取數據時,若是爲空,最多等待時間(最後沒數據,未獲取到)。

SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'                  # 去重規則,在redis中保存時對應的key
"""
renjian:dupefilter:{}
jianren:dupefilter:{}

"""
SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'# 去重規則對應處理的類


# 調度器使用scrapy_redis
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 去重使用 scrapy_redis
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

#數據持久化
#定義持久化,爬蟲yield Item對象時執行RedisPipeline
#將item持久化到redis時,指定key和序列化函數
#使用列表保存item數據
# PIPELINES
# ITEM_PIPELINES = {
#    'scrapy_redis.pipelines.RedisPipeline': 300,
# }
# REDIS_ITEMS_KEY = '%(spider)s:items'
# REDIS_ITEMS_SERIALIZER = 'json.dumps'

# 起始URL
#獲取起始URL時,去集合中獲取仍是去列表中獲取?True從集合獲取,False從列表獲取
#編寫爬蟲時,起始URL從redis的Key中獲取
REDIS_START_URLS_AS_SET = False
REDIS_START_URLS_KEY = '%(name)s:start_urls'

示例redis

import scrapy
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from scrapy.dupefilter import RFPDupeFilter
from scrapy.core.scheduler import Scheduler
import redis
from ..items import XiaobaiItem

from scrapy_redis.spiders import RedisSpider
class RenjianSpider(RedisSpider):
    name = 'xiaobai'
    allowed_domains = ['chouti.com']

    def parse(self, response):

        hxs = HtmlXPathSelector(response)
        news_list = hxs.xpath('//*[@id="content-list"]/div[@class="item"]')

        for news in news_list:

            content = news.xpath('.//div[@class="part1"]/a/text()').extract_first().strip()
            url = news.xpath('.//div[@class="part1"]/a/@href').extract_first()

            yield XiaobaiItem(url=url,content=content)

        yield Request(url='http://dig.chouti.com/',callback=self.parse)
import redis
conn = redis.Redis(host='192.168.16.56',port=6379)
conn.lpush('xiaobai:start_urls','http://www.chouti.com')
相關文章
相關標籤/搜索