Error: #include 「xml/xmlversion.h」 not found
$ xcode-select --install
$ scrapy startproject my_crawler
1
2
3
4
5
6
7
8
|
# -*- coding: utf-8 -*-
import scrapy
class MyCrawlerItem(scrapy.Item):
title = scrapy.Field() # 文章標題
url = scrapy.Field() # 文章地址
summary = scrapy.Field() # 文章摘要
pass
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from my_crawler.items import MyCrawlerItem
class MyCrawlSpider(CrawlSpider):
name = 'my_crawler' # Spider名,必須惟一,執行爬蟲命令時使用
allowed_domains = ['
bjhee.com'] # 限定容許爬的域名,可設置多個
start_urls = [
"
http://www.bjhee.com", # 種子URL,可設置多個
]
rules = ( # 對應特定URL,設置解析函數,可設置多個
Rule(LinkExtractor(allow=r'/page/[0-9]+'), # 指定容許繼續爬取的URL格式,支持正則
callback='parse_item', # 用於解析網頁的回調函數名
follow=True
),
)
def parse_item(self, response):
# 經過XPath獲取Dom元素
articles = response.xpath('//*[@id="main"]/ul/li')
for article in articles:
item = MyCrawlerItem()
item['title'] = article.xpath('h3[@class="entry-title"]/a/text()').extract()[0]
item['url'] = article.xpath('h3[@class="entry-title"]/a/@href').extract()[0]
item['summary'] = article.xpath('div[2]/p/text()').extract()[0]
yield item
|
$ scrapy crawl my_crawler
$ scrapy crawl my_crawler -o my_crawler.json -t json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
# -*- coding: utf-8 -*-
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
class MyCrawlerPipeline(object):
def __init__(self):
# 設置MongoDB鏈接
connection = pymongo.Connection(
settings['MONGO_SERVER'],
settings['MONGO_PORT']
)
db = connection[settings['MONGO_DB']]
self.collection = db[settings['MONGO_COLLECTION']]
# 處理每一個被抓取的MyCrawlerItem項
def process_item(self, item, spider):
valid = True
for data in item:
if not data: # 過濾掉存在空字段的項
valid = False
raise DropItem("Missing {0}!".format(data))
if valid:
# 也能夠用self.collection.insert(dict(item)),使用upsert能夠防止重複項
self.collection.update({'url': item['url']}, dict(item), upsert=True)
return item
|
1
2
3
4
5
6
7
8
9
10
11
|
ITEM_PIPELINES = {
'my_crawler.pipelines.MyCrawlerPipeline': 300, # 設置Pipeline,能夠多個,值爲執行優先級
}
# MongoDB鏈接信息
MONGO_SERVER = 'localhost'
MONGO_PORT = 27017
MONGO_DB = 'bjhee'
MONGO_COLLECTION = 'articles'
DOWNLOAD_DELAY=2 # 若是網絡慢,能夠適當加些延遲,單位是秒
|
$ scrapy crawl my_crawler