引擎是整個scrapy的核心控制和調度scrapy運行.Engine的open_spider方法完成了一些初始化,以及啓動調度器獲取種子隊列以及去重隊列.最後調用self._nest_request開始一次爬取過程.python
@defer.inlineCallbacks def open_spider(self, spider, start_requests=(), close_if_idle=True): assert self.has_capacity(), "No free spider slot when opening %r" % \ spider.name logger.info("Spider opened", extra={'spider': spider}) nextcall = CallLaterOnce(self._next_request, spider) #首先獲取next調用。 scheduler = self.scheduler_cls.from_crawler(self.crawler) #從crawler獲取調度器 start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider) slot = Slot(start_requests, close_if_idle, nextcall, scheduler) self.slot = slot self.spider = spider yield scheduler.open(spider) #啓動調度器, yield self.scraper.open_spider(spider) self.crawler.stats.open_spider(spider) yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider) slot.nextcall.schedule() #調用self._nest_request方法 slot.heartbeat.start(5)
open_spider中slot調用_next_request, 接下來咱們看看_next_request ,先是經過_needs_backout(spider)判斷是否須要結束爬蟲而後返回~而後經過self._next_request_from_scheduler(spider)方法判斷是否還有URL須要去爬.scrapy
def _next_request(self, spider): slot = self.slot if not slot: return if self.paused: return while not self._needs_backout(spider): #是否須要返回 if not self._next_request_from_scheduler(spider): #是否還有URL須要爬取 break if slot.start_requests and not self._needs_backout(spider): try: request = next(slot.start_requests) except StopIteration: slot.start_requests = None except Exception: slot.start_requests = None logger.error('Error while obtaining start requests', exc_info=True, extra={'spider': spider}) else: self.crawl(request, spider) if self.spider_is_idle(spider) and slot.close_if_idle: self._spider_idle(spider)
_next_request又循環經過_next_request_from_scheduler(self,spider)方法從scheduler獲取下一個須要爬取的request,而後送到下載器下載頁面.ide
def _next_request_from_scheduler(self, spider): slot = self.slot request = slot.scheduler.next_request() #從隊列獲取下一個待爬取的request if not request: return d = self._download(request, spider) #使用download下載request d.addBoth(self._handle_downloader_output, request, spider) #輸出下載的response d.addErrback(lambda f: logger.info('Error while handling downloader output', exc_info=failure_to_exc_info(f), extra={'spider': spider})) d.addBoth(lambda _: slot.remove_request(request)) d.addErrback(lambda f: logger.info('Error while removing request from slot', exc_info=failure_to_exc_info(f), extra={'spider': spider})) d.addBoth(lambda _: slot.nextcall.schedule()) d.addErrback(lambda f: logger.info('Error while scheduling new request', exc_info=failure_to_exc_info(f), extra={'spider': spider})) return d
接下來將接受下scrapy中下載器download是如何實現的.fetch
ccdef _download(self, request, spider): slot = self.slot slot.add_request(request) def _on_success(response): assert isinstance(response, (Response, Request)) if isinstance(response, Response): #若是返回的是Response對象打印日誌 response.request = request # tie request to response received logkws = self.logformatter.crawled(request, response, spider) logger.log(*logformatter_adapter(logkws), extra={'spider': spider}) self.signals.send_catch_log(signal=signals.response_received, \ response=response, request=request, spider=spider) return response def _on_complete(_): slot.nextcall.schedule() return _ dwld = self.downloader.fetch(request, spider) #使用downloader的fetch下載request dwld.addCallbacks(_on_success) #添加成功回掉方法 dwld.addBoth(_on_complete) return dwld