爬蟲的分類:html
反爬機制:python
反反爬策略:web
robots.txt協議:聽從或者不聽從json
requests模塊代碼編寫的流程:瀏覽器
# 利用python爬蟲搜索 #1 url = 'https://www.sogou.com/' #2. response = requests.get(url=url) #3. page_text = response.text #4. with open('./sogou.html','w',encoding='utf-8') as fp: fp.write(page_text)
#需求:爬取搜狗指定詞條搜索後的頁面數據 import requests url = 'https://www.sogou.com/web' #封裝參數 wd = input('enter a word:') param = { 'query':wd } response = requests.get(url=url,params=param) page_text = response.content fileName = wd+'.html' with open(fileName,'wb') as fp: fp.write(page_text) print('over')
#爬取百度翻譯結果 url = 'https://fanyi.baidu.com/sug' wd = input('enter a word:') data = { 'kw':wd } response = requests.post(url=url,data=data) print(response.json()) #response.text : 字符串 #.content : 二進制 #.json() : 對象
#爬取豆瓣電影分類排行榜 https://movie.douban.com/中的電影詳情數據 url = 'https://movie.douban.com/j/chart/top_list' param = { "type": "5", "interval_id": "100:90", "action": '', "start": "60", "limit": "100", } movie_data = requests.get(url=url,params=param).json() print(movie_data)
#需求:爬取國家藥品監督管理總局中基於中華人民共和國化妝品生產許可證相關數據http://125.35.6.84:81/xk/ #反爬機制:UA檢測 --> UA假裝 import requests url = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXkzsList' headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } id_list = [] for page in range(1,11): data = { "on": "true", "page": str(page), "pageSize": "15", "productName": "", "conditionType": "1", "applyname": "", "applysn": "", } json_data = requests.post(url=url,data=data,headers=headers).json() for dic in json_data['list']: id = dic['ID'] id_list.append(id) detail_url = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXkzsById' for id in id_list: detail_data = { 'id':id } detail_json = requests.post(url=detail_url,data=detail_data,headers=headers).json() print(detail_json)
#爬取照片 url = 'https://ss2.bdstatic.com/70cFvnSh_Q1YnxGkpoWK1HF6hhy/it/u=806201715,3137077445&fm=26&gp=0.jpg' img_data = requests.get(url=url,headers=headers).content with open('./xiaohua.jpg','wb') as fp: fp.write(img_data)
import urllib url = 'https://ss2.bdstatic.com/70cFvnSh_Q1YnxGkpoWK1HF6hhy/it/u=806201715,3137077445&fm=26&gp=0.jpg' urllib.request.urlretrieve(url=url,filename='./123.jpg')
import re string = '''fall in love with you i love you very much i love she i love her''' re.findall('^i.*',string,re.M)
##################################################################### #匹配所有行 string1 = """細思極恐 你的隊友在看書 你的敵人在磨刀 你的閨蜜在減肥 隔壁老王在練腰 """ re.findall('.*',string1,re.S)
import requests import re import urllib import os url = 'https://www.qiushibaike.com/pic/page/%d/?s=5170552' # page = 1 headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } if not os.path.exists('./qiutu'): os.mkdir('./qiutu') start_page = int(input('enter a start pageNum:')) end_page = int(input('enter a end pageNum:')) for page in range(start_page,end_page+1): new_url = format(url%page) # print(new_url) page_text = requests.get(url=new_url,headers=headers).text img_url_list = re.findall('<div class="thumb">.*?<img src="(.*?)" alt=.*?</div>',page_text,re.S) for img_url in img_url_list: img_url = 'https:'+img_url imgName = img_url.split('/')[-1] imgPath = 'qiutu/'+imgName urllib.request.urlretrieve(url=img_url,filename=imgPath) print(imgPath,'下載成功!') print('over!!!')
bs4解析: 1.pii install bs4 2.pip install lxmlapp
解析原理:python爬蟲
import requests from bs4 import BeautifulSoup url = 'http://www.shicimingju.com/book/sanguoyanyi.html' headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } page_text = requests.get(url=url,headers=headers).text soup = BeautifulSoup(page_text,'lxml') a_list = soup.select('.book-mulu > ul > li > a') fp = open('sanguo.txt','w',encoding='utf-8') for a in a_list: title = a.string detail_url = 'http://www.shicimingju.com'+a['href'] detail_page_text = requests.get(url=detail_url,headers=headers).text soup = BeautifulSoup(detail_page_text,'lxml') content = soup.find('div',class_='chapter_content').text fp.write(title+'\n'+content) print(title,'下載完畢') print('over') fp.close()