該項目始於我的興趣,本意爲給無代碼經驗的朋友作到能開箱即用
閱讀此文須要少許Scrapy,PyQt 知識,全文僅分享交流 摘要思路,如需可閱讀源碼,歡迎提 issuepython
基類封裝了框架所需方法,框架基於三級頁面 (標題-章節-詳情頁) 網站,內部方法分岔線基於交互思想git
GUI傳參並開啓後臺 >> spider開始工做於重寫的start_requests >> 在parse等處理resp的方法後掛起等待選擇github
執行順序爲 (1) parse -- frame_book --> (2) parse_section -- frame_section -->(3) yield item frame方法下述講解web
pipeline對item做最後的下載,更名等處理,至此spider完成一個生命週期,發送結束信號邏輯交回GUI
算法
下面講解scrapy的各塊工做,pickup有點意思的部分緩存
class BaseComicSpider(scrapy.Spider): """改寫start_requests""" step = 'loop' current_status = {} print_Q = None current_Q = None step_Q = None bar = None # 此處及以上變量均爲交互信號 total = 0 # item 計數,pipeline處講解 search_url_head = NotImplementedError('須要自定義搜索網址') mappings = {'': ''} # mappings自定義關鍵字對應網址 # …………………… def parse(self, response): frame_book_results = self.frame_book(response) yield scrapy.Request(url=title_url, ………………) def frame_book(self, response) -> dict: raise NotImplementedError def elect_res(self, elect: list, frame_results: dict, **kw) -> list: # 封裝方法實現(1)選擇elect與(2)frame方法格式化後的顯示result -> # -> 返回[[elected_title1, title1_url], [title2, title2_url]……]的格式數據 pass # …………………… def close(self, reason): # ………處理管道,session等關閉工做 self.print_Q.put('結束信號') # spider生命週期結束
後臺執行的實例,簡單的二級頁面僅需複寫兩個frame方法,對應的是擴展的基類2session
frame方法功能爲定位目標元素位置,實時清洗數據返回給前端顯示app
class ComicxxxSpider(BaseComicSpider2): name = 'comicxxx' allowed_domains = ['m.xxx.com'] search_url_head = 'http://m.xxx.com/search/?keywords=' mappings = {'更新': 'http://m.xxx.com/update/', '排名': 'http://m.xxx.com/rank/'} def frame_book(self, response): # …………………… title = target.xpath(title_xpath).get().strip() self.print_Q.put(example_b.format(str(x + 1), title)) # 發送前端print信號,流失顯示 def frame_section(self, response): pass # 類上
setting.py自定義部分與部署相關,使用 工具集 的方法讀取配置文件構成變量框架
IMAGES_STORE, log_path, PROXY_CUST, LOG_LEVEL = get_info() os.makedirs(f'{log_path}', exist_ok=True) # 日誌輸出 LOG_FILE = f"{log_path}/scrapy.log" SPECIAL = ['xxxxx']
進度條這個一開始時還不知怎麼處理,後來掃了一下Pipeline類的源碼發現downloaded方法算較爲接近了
def file_path(self, request, response=None, info=None): """圖片下載存儲前調用此方法,默認爲url的md5後字符串,此處修改爲自定義的有序命名""" title = sub(r'([|.:<>?*"\\/])', '-', request.item.get('title')) # 對非法字符預處理 section = sub(r'([|.:<>?*"\\/])', '-', request.item.get('section')) page = '第%s頁.jpg' % request.item.get('page') spider = self.spiderinfo.spider # setting.py的參數在此使用 basepath = spider.settings.get('IMAGES_STORE') path = f"{basepath}\\特殊\\{title}" if spider.name in spider.settings.get( 'SPECIAL') else f"{basepath}\\{title}\\{section}\\" os.makedirs(path, exist_ok=True) return os.path.join(path, page) def image_downloaded(self, response, request, info): """繼承的ImagesPipeline圖片(文件)下載完成方法,下載進度條動態顯示的實現就在此處""" self.now += 1 # (ComicPipeline)self.now即爲現時處理量 spider = self.spiderinfo.spider percent = int((self.now / spider.total) * 100) # spider.total即爲item的總任務量 if percent > self.threshold: percent -= int((percent / self.threshold) * 100) # 進度緩慢化(算法待優化) spider.bar.put(int(percent)) # 後臺處理百分比進度扔回GUI界面 super(ComicPipeline, self).image_downloaded(response=response,request=request, info=info)
其餘:Items與Middlewares要點很少,略過
按鍵邏輯:槽函數實現,內部實現必定量的按鈕禁用方法引導操做
視窗與信息
主視窗textbrowser,流式顯示主要數據;總體內聯其餘視窗,略過
說明按鈕通用說明、底下狀態欄經過setStatusTip方法於各操做時提供人性化操做提示
進度條,關聯 pipeline 的信號輸出
節選 Next 按鈕邏輯的 槽函數
def next_schedule(self): def start_and_search(): self.log.debug('===--→ -*- searching') self.next_btn.setText('Next') keyword = self.searchinput.text()[6:].strip() index = self.chooseBox.currentIndex() if self.nextclickCnt == 0: # 從section步 回parse步 的話以避免重開 self.bThread = WorkThread(self) def crawl_btn(text): if len(text) > 5: self.crawl_btn.setEnabled(self.step_recv()=='parse section') self.next_btn.setDisabled(self.crawl_btn.isEnabled()) self.chooseinput.textChanged.connect(crawl_btn) self.p = Process(target=crawl_what, args=(index, self.print_Q, self.bar, self.current_Q, self.step_Q)) self.bThread.print_signal.connect(self.textbrowser_load) self.bThread.item_count_signal.connect(self.processbar_load) self.bThread.finishSignal.connect(self.crawl_end) self.p.start() self.bThread.start() self.log.info(f'-*-*- Background thread starting') self.chooseBox.setDisabled(True) self.params_send({'keyword':keyword}) self.log.debug(f'website_index:[{index}], keyword [{keyword}] success ') def _next(): self.log.debug('===--→ nexting') self.judge_retry() # 非retry的時候先把retry=Flase解鎖spider的下一步 choose = judge_input(self.chooseinput.text()[5:].strip()) if self.nextclickCnt == 1: self.book_choose = choose # 選0的話這裏要爬蟲返回書本數量數據 self.book_num = len(self.book_choose) if self.book_num > 1: self.log.info('book_num > 1') self.textBrowser.append(self.warning_(f'警告!!多選書本時不要隨意使用 retry<br>')) self.chooseinput.clear() # choose邏輯 交由crawl, next,retry3個btn的schedule控制 self.params_send({'choose': choose}) self.log.debug(f'send choose: {choose} success') self.retrybtn.setEnabled(True) if self.next_btn.text()!='搜索': _next() else: start_and_search() self.nextclickCnt += 1 self.searchinput.setEnabled(False) self.chooseinput.setFocusPolicy(Qt.StrongFocus) self.step_recv() # 封裝的self.step_Q處理方法 self.log.debug(f"===--→ next_schedule end (now step: {self.step})\n")
後臺爬蟲進程建立方法 ,上述UI主線程中Next邏輯的 start_and_search() 調用
def crawl_what(index, print_Q, bar, current_Q, step_Q): spider_what = {1: 'comic1, 2: 'comic2', 3: 'comic3'} freeze_support() process = CrawlerProcess(get_project_settings()) process.crawl(spider_what[index], print_Q=print_Q, bar=bar, current_Q=current_Q, step_Q=step_Q) process.start() process.join() process.stop()
分離UI主線程與工做線程(項目代碼中此處可整合爬蟲進程一塊兒)
class WorkThread(QThread): item_count_signal = pyqtSignal(int) print_signal = pyqtSignal(str) finishSignal = pyqtSignal(str) active = True def __init__(self, gui): super(WorkThread, self).__init__() self.gui = gui def run(self): while self.active: self.msleep(8) if not self.gui.print_Q.empty(): self.msleep(8) self.print_signal.emit(str(self.gui.print_Q.get())) if not self.gui.bar.empty(): self.item_count_signal.emit(self.gui.bar.get()) self.msleep(10) if '完成任務' in self.gui.textBrowser.toPlainText(): self.item_count_signal.emit(100) self.msleep(20) break if self.active: from ComicSpider.settings import IMAGES_STORE self.finishSignal.emit(IMAGES_STORE)
資源處理工具
工具集 utils.py
def get_info(): with open(f'./setting.txt', 'r', encoding='utf-8') as fp: text = fp.read() sv_path = re.search('<([\s\S]+)>', text).group(1) level = re.search('(DEBUG|WARNING|ERROR)', text).group(1) # ……………… def cLog(name, level='INFO', **kw) -> Logger: # 同理讀取setting.txt, level = re.search('(DEBUG|WARNING|ERROR)', text).group(1) def judge_input(_input: str) -> list: # 這方法自我感受用起來還挺順手 """ "6" return [6] // "1+3+5" return [1,3,5] "4-6" return [4,5,6] // "1+4-6" return [1,4,5,6] """
部署實爲pyinstaller打包成exe
pyinstaller注意要點:
datas
中每一個值中 前爲項目現位置,後爲運行時位置;慎用網上傳授的('.', '.'),使用不當會使得git體積飛漲debug
、console
設置爲True,方便調試 ( 與上述導入模塊調試有所關聯spec參考
# -*- mode: python -*- block_cipher = None a = Analysis(['crawl_go.py'], pathex=['D:\\xxxxxxxxxxxxxxxx\\ComicSpider'], binaries=[], datas=[('D:\python\Lib\site-packages\scrapy\mime.types','scrapy'), ('D:\python\Lib\site-packages\scrapy\VERSION','scrapy'), ('./ComicSpider','ComicSpider'), ('./GUI', 'GUI'), ('./gui.py', '.'), ('./material_ct.py', '.'), ('./utils.py', '.'), ], # -*- hiddenimports=[], hookspath=[], runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher, noarchive=False) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, a.binaries, a.zipfiles, a.datas, [], name='ComicSpider', debug=True, # -*- bootloader_ignore_signals=False, strip=False, upx=True, runtime_tmpdir=None, console=True, icon='exe.ico') # -*-
打包後目錄樹
├── ComicSpider.exe ├── log │ ├── GUI.log │ └── scrapy.log ├── scrapy.cfg # 經測試過,scrapy.cfg內置到exe中並不起做用,猜想與緩存路徑有關,外置無傷大雅 ├── setting.txt
scrapy用在這種單機交互上的效果不錯,pyqt方面還只算用到了皮毛 ,就是邏輯寫得彷佛不雅,之後學習下策略模式應用
歡迎你們前往 本項目 試用下交流下意見