import requests import re import json import time from bs4 import BeautifulSoup from pyquery import PyQuery as pq from lxml import etree # 獲取頁面源碼 def get_one_page(url): try: headers = { # 假裝請求頭 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36' } response = requests.get(url, headers=headers) # 構造響應 if response.status_code == 200: # 判斷狀態碼 return response.text return None except requests.exceptions.RequestException as r: return None # 正則表達式提取源碼關鍵信息 def parse_one_page(html): # 正則表達式查詢目標信息 pattern = re.compile( '<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S) items = re.findall(pattern, html) for item in items: # 包含yield表達式的函數是特殊的函數,叫作生成器函數(generator function),被調用時將返回一個迭代器(iterator),調用時可使用next或send(msg)。它的用法與return類似,區別在於它會記住上次迭代的狀態,繼續執行。 yield{ # yield關鍵字 'index': item[0], 'image': item[1], 'title': item[2].strip(), 'actor': item[3].strip()[3:], # if len(item[3])>3 else '', 'time': item[4].strip()[5:], # if len(item[4])>5 else '', 'score': item[5].strip()+item[6].strip() } #Xpath提取信息 def xpath_demo(html): html=etree.HTML(html) str1='//dd[' for i in range(10): yield{ # yield關鍵字 'index': html.xpath(str1+str(i)+']/i/text()'), 'image': html.xpath(str1+str(i)+']/a/img[@class="board-img"]/@data-src'), 'title': html.xpath(str1+str(i)+']//p/a[@data-act="boarditem-click"]/text()'), 'actor': ''.join(html.xpath(str1+str(i)+']//p[@class="star"]/text()')).strip(), 'time': html.xpath(str1+str(i)+']//p[@class="releasetime"]/text()'), 'score': ''.join(html.xpath(str1+str(i)+']//p[@class="score"]/i/text()')), } # bs4提取關鍵信息 def bs4_demo(html): soup = BeautifulSoup(html, 'lxml') # pq=PyQuery(html) # for item in pq('dd img/.board-img') for dd in soup.find_all(name='dd'): yield{ 'index': dd.find(name='i', attrs={'class': 'board-index'}).string.strip(),#去掉先後空格 'image': dd.find(name='img', attrs={'class': 'board-img'})['data-src'], 'title': dd.find(name='p', attrs={'class': 'name'}).string.strip(), 'actor': dd.find(name='p', attrs={'class': 'star'}).string.strip(), 'time': dd.find(name='p', attrs={'class': 'releasetime'}).string.strip(), 'score': dd.find(name='i', attrs={'class': 'integer'}).string+dd.find(name='i', attrs={'class': 'fraction'}).string } #pyquery css篩選信息 def pyquery_demo(html): doc=pq(html) for dd in doc('dd').items(): yield{ 'index': dd.find('i.board-index').text(),#獲取文本 'image': dd.find('img.board-img').attr('data-src'),#獲取屬性 'title': dd.find('p.name a').text(), 'actor': dd.find('p.star').text(), 'time': dd.find('p.releasetime').text(), 'score': dd.find('p.score i.integer').text()+dd.find('p.score i.fraction').text() } def write_to_file(content): with open('/Users/zz/Desktop/result.txt', 'a', encoding='utf-8') as f: # json.dumps()實現字典的序列化,ensure_ascii=False保證輸出非Unicode編碼 f.write(json.dumps(content, ensure_ascii=False)+'/n') def main(offset): url = 'https://maoyan.com/board/4?offset='+str(offset) html = get_one_page(url) # for item in parse_one_page(html): #for item in bs4_demo(html): #for item in pyquery_demo(html): for item in xpath_demo(html): print(item) # write_to_file(item) # 寫入文件 if __name__ == '__main__': # 是否從控制檯執行 for i in range(10): main(offset=i*10) time.sleep(1)#避免操做過快被識別