爬蟲學習(五)——百度貼吧的爬取

import os
import time
import urllib.request
import urllib.parse


# 輸入目標頁碼和吧名
def header():
url = "https://tieba.baidu.com/f?"
baming = input("請輸入要爬取的吧名")
start_page = int(input("請輸入起始頁"))
end_page = int(input("請輸入結束頁"))
# 對目標頁碼進行爬取
for page in range(start_page,end_page+1):
print("正在爬取第%s頁"%page)
request = headle_request(page,url,baming)
download(request,baming,page)
# 設置時間間隔,防止網站識別爲惡意攻擊
time.sleep(2)


# 構建請求對象,拼接url
def headle_request(page,url,baming):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"}
pn = (page-1)*50
data = {
"kw": baming,
"ie": "utf8",
"pn": pn
}
data = urllib.parse.urlencode( data )
url += data
request = urllib.request.Request( url, headers=headers )
return request


# 根據請求對象下載指定的目標數據
def download(request,baming,page):
# 獲取響應數據
response = urllib.request.urlopen(request)
# 建立文件存儲的文件夾
if not os.path.exists(baming):
os.mkdir(baming)
# 拼接文件名
filename =baming+"第%s頁"%page+".html"
print(filename)
# 拼接文件路徑
filepath = os.path.join(baming,filename)
# 將相應的數據存儲起來
with open(filepath,"wb") as tf:
tf.write(response.read())
if __name__ == '__main__':
header()
相關文章
相關標籤/搜索