將scrapy爬取數據經過django入到SQLite數據庫

1. 在django項目根目錄位置建立scrapy項目,django_12是django項目,ABCkg是scrapy爬蟲項目,app1是django的子應用django

 2.在Scrapy的settings.py中加入如下代碼json

import os
import sys
sys.path.append(os.path.dirname(os.path.abspath('.')))
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_12.settings'    # 項目名.settings
import django
django.setup()

3.編寫爬蟲,下面代碼以ABCkg爲例,abckg.pyapp

# -*- coding: utf-8 -*-
import scrapy
from ABCkg.items import AbckgItem

class AbckgSpider(scrapy.Spider):
    name = 'abckg'   #爬蟲名稱
    allowed_domains = ['www.abckg.com'] # 容許爬取的範圍
    start_urls = ['http://www.abckg.com/'] # 第一次請求的地址
    def parse(self, response):
        print('返回內容:{}'.format(response))
        """
        解析函數
        :param response: 響應內容
        :return:
        """
        listtile = response.xpath('//*[@id="container"]/div/div/h2/a/text()').extract()
        listurl = response.xpath('//*[@id="container"]/div/div/h2/a/@href').extract()

        for index in range(len(listtile)):
            item = AbckgItem()
            item['title'] = listtile[index]
            item['url'] = listurl[index]
            yield scrapy.Request(url=listurl[index],callback=self.parse_content,method='GET',dont_filter=True,meta={'item':item})
        # 獲取下一頁
        nextpage = response.xpath('//*[@id="container"]/div[1]/div[10]/a[last()]/@href').extract_first()
        print('即將請求:{}'.format(nextpage))
        yield scrapy.Request(url=nextpage,callback=self.parse,method='GET',dont_filter=True)
        # 獲取詳情頁
    def parse_content(self,response):
        item = response.meta['item']
        item['content'] = response.xpath('//*[@id="post-1192"]/dd/p').extract()
        print('內容爲:{}'.format(item))
        yield item

4.scrapy中item.py 中引入django模型類dom

      pip install scrapy-djangoitemscrapy

from app1 import models
from scrapy_djangoitem import DjangoItem

class AbckgItem(DjangoItem):
    # define the fields for your item here like:
    # name = scrapy.Field()           # 普通scrapy爬蟲寫法
    # title = scrapy.Field()
    # url = scrapy.Field()
    # content = scrapy.Field()
    django_model = models.ABCkg     # 注入django項目的固定寫法,必須起名爲django_model  =django中models.ABCkg表

5.pipelines.py中調用save()ide

import json
from pymongo import MongoClient
# 用於接收parse函數發過來的item
class AbckgPipeline(object):
    # i = 0
    def open_spider(self,spider):
        # print('打開文件')
        if spider.name == 'abckg':
            self.f = open('abckg.json',mode='w')
    def process_item(self, item, spider):
        # # print('ABC管道接收:{}'.format(item))
        # if spider.name == 'abckg':
        #     self.f.write(json.dumps(dict(item),ensure_ascii=False))
        # # elif spider.name == 'cctv':
        # #     img = requests.get(item['img'])
        # #     if img != '':
        # #         with open('圖片\%d.png'%self.i,mode='wb')as f:
        # #             f.write(img.content)
        # #     self.i += 1
        item.save()
        return item    # 將item傳給下一個管道執行
    def close_spider(self,spider):
        # print('關閉文件')
        self.f.close()

6.在django中models.py中一個模型類,字段對應爬取到的數據,選擇適當的類型與長度函數

class ABCkg(models.Model):
    title = models.CharField(max_length=30,verbose_name='標題')
    url = models.CharField(max_length=100,verbose_name='網址')
    content = models.CharField(max_length=200,verbose_name='內容')
    class Meta:
        verbose_name_plural = '爬蟲ABCkg'
    def __str__(self):
        return self.title

7.經過命令啓動爬蟲:scrapy crawl 爬蟲名稱post

8.django進入admin後臺便可看到爬取到的數據。url

相關文章
相關標籤/搜索