爬蟲:(在這裏不用配置start_url,直接能夠取redis裏面取start_url,能夠多個)
from scrapy_redis.spiders import RedisSpider # class ChoutiSpider(scrapy.Spider):
class ChoutiSpider(RedisSpider): name = 'baidu'##在這裏設置了這個name,那麼在redispider裏面就能夠按照這個key來找到裏面對應的值(開始url,可能多個),
#key的格式是:self.redis_key = self.redis_key % {'name': self.name}
allowed_domains = ['baidu.com'] def parse(self, response): print('執行操做') print(response)
在settings裏面的配置:
#true的話,就是集合,false的話,就爲列表
REDIS_START_URLS_AS_SET=False#默認是false,列表的格式取數據出來
# REDIS_START_URLS_KEY = '%(name)s:start_urls'#不設置默認是這個,這個是存入redis裏面的key,能夠根據這來取value,例如:baidu:start_urls
若是是列表的話,取數據是lpop(key),key就是下面的這個
若是是集合的話,集合取數據是spop(key),例如:spop('baidu:start_urls')>>後面就是對應的所有的開始url(能夠多個)
redis存入開始url:
在了一個.py文件裏面存入開始url
列表:
import redis conn=redis.Redis(host='127.0.0.1',port=6379) conn.lpush('baidu:start_urls','http://www.baidu.com') 若是是settings裏面: REDIS_START_URLS_AS_SET=False的話,就是列表的形式,存入就是lpush或者是rpush等操做 若是是true的話,那麼存入就是集合的形式,sadd等操做
集合:
import redis conn=redis.Redis(host='127.0.0.1',port=6379) conn.sadd('baidu:start_urls','http://www.baidu.com')##按照這個格式來存數據的
print(conn.smembers('baidu:start_urls'))
scrapy_redis裏面的spider源碼分析:
class RedisMixin(object): """Mixin class to implement reading urls from a redis queue.""" redis_key = None redis_batch_size = None redis_encoding = None # Redis client placeholder.
server = None def start_requests(self): """Returns a batch of start requests from redis."""
return self.next_requests() def setup_redis(self, crawler=None): """Setup redis connection and idle signal. This should be called after the spider has set its crawler object. """
if self.server is not None: return
if crawler is None: # We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None) if crawler is None: raise ValueError("crawler is required") settings = crawler.settings #####去配置文件裏面那這個其始url,START_URLS_KEY = '%(name)s:start_urls',若是沒有配置文件的話,就讀取後面部分
if self.redis_key is None: self.redis_key = settings.get( 'REDIS_START_URLS_KEY', defaults.START_URLS_KEY, )##在這裏能夠本身設置這個格式,REDIS_START_URLS_KEY在settigs裏面設置成本身想要保存的格式,注意:本身就按照這個
,格式進行保存,下面就以這個格式做爲鍵進行查找到相對應的所有的開始url self.redis_key = self.redis_key % {'name': self.name}####在這裏設置這個name的redis查詢的key,若是在redis裏面有這個key存在的話,就取出裏面的值進行查找
''' 因此能夠本身在添加開始到這個name裏面去,這個key格式是固定的,START_URLS_KEY = '%(name)s:start_urls'''
######寫入redis的這個key裏面存進去,裏面能夠放url,多個,而後拿到多個開始的url
if not self.redis_key.strip(): raise ValueError("redis_key must not be empty") if self.redis_batch_size is None: # TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint( ###取配置文件裏面取值,後面是int的類型,轉化爲int的類型
'REDIS_START_URLS_BATCH_SIZE', settings.getint('CONCURRENT_REQUESTS'), ) try: self.redis_batch_size = int(self.redis_batch_size) except (TypeError, ValueError): raise ValueError("redis_batch_size must be an integer") if self.redis_encoding is None: self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING) self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s", self.__dict__) self.server = connection.from_settings(crawler.settings) # The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle) def next_requests(self): """Returns a request to be scheduled or none.""" use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET) fetch_one = self.server.spop if use_set else self.server.lpop ##作了判斷,若是是REDIS_START_URLS_AS_SET=True得話,那麼就爲集合
##作了判斷,若是是REDIS_START_URLS_AS_SET=False得話,那麼就爲列表
# XXX: Do we need to use a timeout here?
found = 0 # TODO: Use redis pipeline execution.
####在下面進行尋找,若是存在這個redis_key的話,就執行,有多個就執行多個其實url,
'''' 下面是一直循環着,看有沒有其實url,在redis裏面,這個格式是,REDIS_START_URLS_KEY = '%(name)s:start_urls'>> 當爲false的時候,就是以列表的形式查找 conn.lpush('baidu:start_urls','http://www.baidu.com') 當true的時候,就是集合 '''
while found < self.redis_batch_size: data = fetch_one(self.redis_key)##多是spop或者是lpop
if not data: # Queue empty.
break req = self.make_request_from_data(data) if req: yield req found += 1
else: self.logger.debug("Request not made from data: %r", data) if found: self.logger.debug("Read %s requests from '%s'", found, self.redis_key) def make_request_from_data(self, data): """Returns a Request instance from data coming from Redis. By default, ``data`` is an encoded URL. You can override this method to provide your own message decoding. Parameters ---------- data : bytes Message from redis. """ url = bytes_to_str(data, self.redis_encoding) return self.make_requests_from_url(url) def schedule_next_requests(self): """Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests(): self.crawler.engine.crawl(req, spider=self) def spider_idle(self): """Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests() raise DontCloseSpider