實例1:中國大學排名定向爬蟲html
功能描述python
輸入:大學排名URL連接瀏覽器
輸出:大學排名信息的屏幕輸出(排名,大學名稱,總分)cookie
技術路線:requests-bs4網絡
定向爬蟲:僅對輸入URL進行爬取,不擴展爬取數據結構
程序的結構設計app
步驟1:從網絡上獲取大學排名網頁內容getHTMLText()函數
步驟2:提取網頁內容中信息到合適的數據結構fillUnivList()優化
步驟3:利用數據結構展現並輸出結果printUnivList()網站
大學排名除了排名以外還包括大學的基本信息,是一個典型的二維數據結構,因此採用二維列表組織相關信息
import requests
from bs4 import BeautifulSoup
import bs4
def getHTMLText(url):
try:
r = requests.get(url,timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def fillUnivList(ulist,html):
soup = BeautifulSoup(html,"html.parser")
for tr in soup.find('tbody').children:
if isinstance(tr,bs4.element.Tag):#檢查tr標籤的類型,若是tr標籤的類型不是bs4庫定義的tag類型,將過濾掉
tds = tr('td')
ulist.append((tds[0].string,tds[1].string,tds[3].string))
def printUnivList(ulist,num):
# print("{:^10}\t{:^6}\t{:^10}".format("排名","學校名稱","總分"))
# for i in range(num):
# u=ulist[i]
# print("{:^10}\t{:^6}\t{:^10}".format(u[0],u[1],u[2]))
# print("Suc" + str(num))
#對中英文混排輸出進行優化
tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}"#{3}指的是在打印學校排名的這一欄中須要填充時,使用format函數的第三個變量進行填充,也就是使用中文的空格來進行填充
print(tplt.format("排名","學校名稱","總分",chr(12288)))
for i in range(num):
u=ulist[i]
print(tplt.format(u[0],u[1],u[2],chr(12288)))
print("Suc" + str(num))
def main():
uinfo = []
url = "http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html"
html = getHTMLText(url)
fillUnivList(uinfo,html)
printUnivList(uinfo,20)#20 univs
main()
實例2:淘寶商品信息定向爬蟲
功能描述
目標:獲取淘寶搜索頁面的信息,提取其中的商品名稱和價格
理解:淘寶的搜索接口
翻頁的處理
技術路線:requests-re
定向爬蟲可行性
程序的結構設計
步驟1:提交商品搜索請求,循環獲取頁面
步驟2:對於每一個頁面,提取商品名稱和價格信息
步驟3:將信息輸出到屏幕上
import requests import re def getHTMLTest(url): try:#coo的值爲本身登陸淘寶後,輸入書包搜索後的cookie coo = 'thw=cn; cna=24t1FdsskikCAXEteKSVn8yS; v=0; t=6615fa5b788406278f02379f51d55807; \ cookie2=55a650e680a8e140771936b04cb56e95; _tb_token_=f68134b3ee336; unb=763489836; \ uc3=nk2=oAY%2Bx%2FHWV8vidQ%3D%3D&id2=VAcN5rR6zAjv&vt3=F8dBy32junLF5eJpjDs%3D&lg2=VT5L2FSpMGV7TQ%3D%3D;\ csg=8ec2be45; lgc=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C; cookie17=VAcN5rR6zAjv;\ dnk=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C; skt=e8b0543f48622a97; existShop=MTU2NDkwODMzOQ%3D%3D;\ uc4=id4=0%40Vh5PI3jEh3Oc2p0oDeG%2Fvw4ylAo%3D&nk4=0%40olT0l5EQsQIZXSm9RQUe%2FzVpfGL%2F;\ tracknick=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C; _cc_=VT5L2FSpdA%3D%3D; tg=0; _l_g_=Ug%3D%3D;\ sg=%E5%A4%9C6f; _nk_=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C;\ cookie1=Vv6fkO6X3Dbd0%2BjR5Pm9%2FVMegu88LAEuGgMSjoFaFFg%3D;\ enc=aBauooIlET%2FTz%2FO%2By206HZzzoZUzq%2BmM220DoSa8xXJAwE9%2FtIJe5hfuwu12e9GfpcG%2F9ZNzpm6JBo%2F2D%2BNsig%3D%3D; \ mt=ci=110_1; hng=CN%7Czh-CN%7CCNY%7C156; swfstore=308335;\ x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0;\ uc1=cookie14=UoTaHPGk7cSIQw%3D%3D&cookie15=V32FPkk%2Fw0dUvg%3D%3D;\ whl=-1%260%260%261564908848429; JSESSIONID=9A789A993ECB09BAABEF6E4A29CC2510; \ l=cBgxe_J4qUKhmO7bBOCg5uI8LO7OSIRA_uPRwCVXi_5Ba6Ls0d_Ok7JG-Fp6VjWd90TB4dG4psy9-etkiKy06Pt-g3fP.;\ isg=BFtbb-bCgqTERv7QpzRjXQhf6r8FmG5bzZ8Il02YN9pxLHsO1QD_gnmuxswHC8cq' r = requests.get(url,headers={'cookie':coo},timeout = 30) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return "" def parsePage(ilt,html): try: plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"',html) tlt = re.findall(r'\"raw_title\":\".*?\"',html) for i in range(len(plt)): price = eval(plt[i].split(':')[1])#eval函數能夠將得到的字符串的最外層的單引號或者雙引號去掉 title = eval(tlt[i].split(':')[1]) ilt.append([price,title]) except: print("") def printGoodsList(ilt): tplt = "{:4}\t{:8}\t{:16}" print(tplt.format("序號","價格","商品名稱")) count = 0 for g in ilt: count = count + 1 print(tplt.format(count,g[0],g[1])) def main(): goods = '書包' depth = 2 start_url = 'https://s.taobao.com/search?q=' + goods infoList = [] for i in range(depth): try: url = start_url + '&s=' + str(44*i) html = getHTMLTest(url) parsePage(infoList,html) except: continue printGoodsList(infoList) main()
import requests import re def getHTMLTest(url): headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36'} try:#coo的值爲本身登陸淘寶後,輸入書包搜索後的cookie coo = 'thw=cn; cna=24t1FdsskikCAXEteKSVn8yS; v=0; t=6615fa5b788406278f02379f51d55807; \ cookie2=55a650e680a8e140771936b04cb56e95; _tb_token_=f68134b3ee336; unb=763489836; \ uc3=nk2=oAY%2Bx%2FHWV8vidQ%3D%3D&id2=VAcN5rR6zAjv&vt3=F8dBy32junLF5eJpjDs%3D&lg2=VT5L2FSpMGV7TQ%3D%3D;\ csg=8ec2be45; lgc=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C; cookie17=VAcN5rR6zAjv;\ dnk=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C; skt=e8b0543f48622a97; existShop=MTU2NDkwODMzOQ%3D%3D;\ uc4=id4=0%40Vh5PI3jEh3Oc2p0oDeG%2Fvw4ylAo%3D&nk4=0%40olT0l5EQsQIZXSm9RQUe%2FzVpfGL%2F;\ tracknick=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C; _cc_=VT5L2FSpdA%3D%3D; tg=0; _l_g_=Ug%3D%3D;\ sg=%E5%A4%9C6f; _nk_=%5Cu7709%5Cu770B%5Cu6708%5Cu4E4B%5Cu591C;\ cookie1=Vv6fkO6X3Dbd0%2BjR5Pm9%2FVMegu88LAEuGgMSjoFaFFg%3D;\ enc=aBauooIlET%2FTz%2FO%2By206HZzzoZUzq%2BmM220DoSa8xXJAwE9%2FtIJe5hfuwu12e9GfpcG%2F9ZNzpm6JBo%2F2D%2BNsig%3D%3D; \ mt=ci=110_1; hng=CN%7Czh-CN%7CCNY%7C156; swfstore=308335;\ x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0;\ uc1=cookie14=UoTaHPGk7cSIQw%3D%3D&cookie15=V32FPkk%2Fw0dUvg%3D%3D;\ whl=-1%260%260%261564908848429; JSESSIONID=9A789A993ECB09BAABEF6E4A29CC2510; \ l=cBgxe_J4qUKhmO7bBOCg5uI8LO7OSIRA_uPRwCVXi_5Ba6Ls0d_Ok7JG-Fp6VjWd90TB4dG4psy9-etkiKy06Pt-g3fP.;\ isg=BFtbb-bCgqTERv7QpzRjXQhf6r8FmG5bzZ8Il02YN9pxLHsO1QD_gnmuxswHC8cq' cookies = {} for line in coo.split(';'):#瀏覽器假裝 name,value = line.strip().split('=',1) cookies[name] = value r = requests.get(url,cookies = cookies,headers = headers,timeout = 30) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return "" def parsePage(ilt,html): try: plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"',html) tlt = re.findall(r'\"raw_title\":\".*?\"',html) for i in range(len(plt)): price = eval(plt[i].split(':')[1])#eval函數能夠將得到的字符串的最外層的單引號或者雙引號去掉 title = eval(tlt[i].split(':')[1]) ilt.append([price,title]) except: print("") def printGoodsList(ilt): tplt = "{:4}\t{:8}\t{:16}" print(tplt.format("序號","價格","商品名稱")) count = 0 for g in ilt: count = count + 1 print(tplt.format(count,g[0],g[1])) def main(): goods = '書包' depth = 2 start_url = 'https://s.taobao.com/search?q=' + goods infoList = [] for i in range(depth): try: url = start_url + '&s=' + str(44*i) html = getHTMLTest(url) parsePage(infoList,html) except: continue printGoodsList(infoList) main()
實例3:股票數據定向爬蟲
功能描述
目標:獲取上交所和深交所全部股票的名稱和交易信息
輸出:保存到文件中
技術路線:requests-bs4-re
候選數據網站的選擇
新浪股票:https://finance.sina.com.cn/stock/
百度股票:https://gupiao.baidu.com/stock
選取原則:股票信息靜態存在於HTML頁面中,非js代碼生成,沒有Robots協議限制。
選取方法:瀏覽器F12,源代碼查看等。
選取心態:不要糾結於某個網站,多找信息源嘗試。
程序的結構設計
步驟1:從東方財富網獲取股票列表
步驟2:根據股票列表逐個到百度股票獲取個股信息
步驟3:將結果存儲到文件
import requests from bs4 import BeautifulSoup import re import traceback #得到url對應的頁面 def getHTMLText(url,code = 'utf-8'): try: r = requests.get(url,timeout = 30) r.raise_for_status() #r.encoding = r.apparent_encoding#編碼識別的優化,手工的方式先得到編碼的類型(apparent_encoding),而後直接賦給encoding r.encoding = code return r.text except: return "" #得到股票的信息列表 def getStockList(lst,stockURL): html = getHTMLText(stockURL,'GB2312') soup = BeautifulSoup(html,'html.parser') a = soup.find_all('a') for i in a: try: href = i.attrs['href'] lst.append(re.findall(r"[s][hz]\d{6}",href)[0]) except: continue #得到每一支個股的股票信息,而且把他存到某一個數據結構 def getStockInfo(lst,stockURL,fpath): count = 0 for stock in lst: url = stockURL +stock + ".html" html = getHTMLText(url) try: if html == "": continue infoDict = {} soup = BeautifulSoup(html,'html.parser') stockInfo = soup.find('div',attrs={'class':'stock-bets'}) name = stockInfo.find_all(attrs={'class':'bets-name'})[0] infoDict.update({'股票名稱':name.text.split()[0]}) keyList = stockInfo.find_all('dt') valueList = stockInfo.find_all('dd') for i in range(len(keyList)): key = keyList[i].text val = valueList[i].text infoDict[key] = val with open(fpath,'a',encoding='utf-8') as f: f.write(str(infoDict) + '\n') count = count + 1 print("\r當前進度:{:.2f}%".format(count*100/len(lst)),end=' ')#增長動態的進度顯示, #\r可以將咱們打印的字符串的最後的光標提到當前這一行的頭部,下一次再進行相關打印的時候, #打印的信息就會覆蓋以前打印的內容,這樣實現了一個不換行的、動態變化的信息展現 except: count = count + 1 print("\r當前進度:{:.2f}%".format(count*100/len(lst)),end=' ') traceback.print_exc() continue def main(): stock_list_url = 'http://quote.eastmoney.com/stock_list.html' stock_info_url = 'https://gupiao.baidu.com/stock/' output_file = 'D://BaiduStockInfo.txt' slist = [] getStockList(slist,stock_list_url) getStockInfo(slist,stock_info_url,output_file) main()
>>> r = requests.get('http://quote.eastmoney.com/stock_list.html',timeout = 30)>>> r.apparent_encoding'GB2312'>>> r = requests.get('https://gupiao.baidu.com/stock/',timeout = 30)>>> r.apparent_encoding'utf-8'