原文連接:https://www.fkomm.cn/article/...html
Scrapy框架爲文件和圖片的下載專門提供了兩個Item Pipeline 它們分別是:框架
這裏主要介紹ImagesPipeline!!dom
此次咱們要爬的是汽車之家:car.autohome.com.cn。最近喜歡吉利博越,因此看了很多這款車的資料。scrapy
咱們就點開博越汽車的圖片網站:ide
https://car.autohome.com.cn/p...函數
1.建立scrapy項目和爬蟲:網站
$ scrapy startproject Geely $ cd Geely $ scrapy genspider BoYue car.autohome.com.cn
2.編寫items.py:ui
import scrapy class GeelyItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() # 存儲圖片分類 catagory = scrapy.Field() # 存儲圖片地址 image_urls = scrapy.Field() # ImagesPipeline images = scrapy.Field()
3.編寫Spider:url
# -*- coding: utf-8 -*- import scrapy #導入CrawlSpider模塊 需改寫原來的def parse(self,response)方法 from scrapy.spiders import CrawlSpider ,Rule #導入連接提取模塊 from scrapy.linkextractors import LinkExtractor from Geely.items import GeelyItem class BoyueSpider(CrawlSpider): name = 'BoYue' allowed_domains = ['car.autohome.com.cn'] start_urls = ['https://car.autohome.com.cn/pic/series/3788.html'] #如須要進行頁面解釋則使用callback回調函數 由於有下一頁,因此咱們須要跟進,這裏使用follow令其爲True rules = { Rule(LinkExtractor(allow=r'https://car.autohome.com.cn/pic/series/3788.+'), callback= 'parse_page', follow=True), } def parse_page(self, response): catagory = response.xpath('//div[@class = "uibox"]/div/text()').get() srcs = response.xpath('//div[contains(@class,"uibox-con")]/ul/li//img/@src').getall() #map(函數,參數二),將參數二中的每一個都進行函數計算並返回一個列表 srcs = list(map(lambda x:x.replace('t_',''),srcs)) srcs = list(map(lambda x:response.urljoin(x),srcs)) yield GeelyItem(catagory=catagory, image_urls = srcs)
4.編寫PIPELINE:spa
import os from urllib import request class GeelyPipeline(object): def __init__(self): #os.path.dirname()獲取當前文件的路徑,os.path.join()獲取當前目錄並拼接成新目錄 self.path = os.path.join(os.path.dirname(__file__), 'images') # 判斷路徑是否存在 if not os.path.exists(self.path): os.mkdir(self.path) def process_item(self, item, spider): #分類存儲 catagory = item['catagory'] urls = item['image_urls'] catagory_path = os.path.join(self.path, catagory) #若是沒有該路徑即建立一個 if not os.path.exists(catagory_path): os.mkdir(catagory_path) for url in urls: #以_進行切割並取最後一個單元 image_name = url.split('_')[-1] request.urlretrieve(url,os.path.join(catagory_path,image_name)) return item
5.編寫settings.py
BOT_NAME = 'Geely' SPIDER_MODULES = ['Geely.spiders'] NEWSPIDER_MODULE = 'Geely.spiders' # Obey robots.txt rules ROBOTSTXT_OBEY = False ITEM_PIPELINES = { 'Geely.pipelines.GeelyPipeline': 1, }
6.讓項目跑起來:
$ scrapy crawl BoYue
7.結果展現:
在上面的基礎上修改
1.修改settings.py
ITEM_PIPELINES = { # 'Geely.pipelines.GeelyPipeline': 1, # 'scrapy.pipelines.images.ImagesPipeline': 1, 'Geely.pipelines.GeelyImagesPipeline': 1, } #工程根目錄 project_dir = os.path.dirname(__file__) #下載圖片存儲位置 IMAGES_STORE = os.path.join(project_dir, 'images')
2.改寫pipelines,py
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import os from urllib import request from scrapy.pipelines.images import ImagesPipeline from Geely import settings # class GeelyPipeline(object): # def __init__(self): # #os.path.dirname()獲取當前文件的路徑,os.path.join()獲取當前目錄並拼接成新目錄 # self.path = os.path.join(os.path.dirname(__file__), 'images') # # 判斷路徑是否存在 # if not os.path.exists(self.path): # os.mkdir(self.path) # def process_item(self, item, spider): # #分類存儲 # catagory = item['catagory'] # urls = item['image_urls'] # catagory_path = os.path.join(self.path, catagory) # #若是沒有該路徑即建立一個 # if not os.path.exists(catagory_path): # os.mkdir(catagory_path) # for url in urls: # #以_進行切割並取最後一個單元 # image_name = url.split('_')[-1] # request.urlretrieve(url,os.path.join(catagory_path,image_name)) # return item # 繼承ImagesPipeline class GeelyImagesPipeline(ImagesPipeline): # 該方法在發送下載請求前調用,自己就是發送下載請求的 def get_media_requests(self, item, info): # super()直接調用父類對象 request_objects = super(GeelyImagesPipeline, self).get_media_requests(item, info) for request_object in request_objects: request_object.item = item return request_objects def file_path(self, request, response=None, info=None): path = super(GeelyImagesPipeline, self).file_path(request, response, info) # 該方法是在圖片將要被存儲時調用,用於獲取圖片存儲的路徑 catagory = request.item.get('catagory') # 拿到IMAGES_STORE images_stores = settings.IMAGES_STORE catagory_path = os.path.join(images_stores, catagory) #判斷文件名是否存在,若是不存在建立文件 if not os.path.exists(catagory_path): os.mkdir(catagory_path) image_name = path.replace('full/','') image_path = os.path.join(catagory+'/',image_name) return image_path
3.讓項目跑起來:
$ scrapy crawl BoYue
將會獲得與原來相同的結果!!!!
圓方圓學院聚集 Python + AI 名師,打造精品的 Python + AI 技術課程。 在各大平臺都長期有優質免費公開課,歡迎報名收看。
公開課地址:https://ke.qq.com/course/362788