這篇是上一篇的進階版,雖然也是下載圖片到本地,但比上一篇複雜了許多,上一篇只是下載當前頁的圖片到本地,這一篇是下載整站的詳情頁圖片到本地,涉及了連續多頁爬取,根據 item 建立文件夾保存每一個詳情頁的圖片,爬取的數據量大大提高了好幾十幾百。html
本次要爬取的網站依然是 帥啊 網 咱們須要把整站的詳情頁圖片下載到本地bash
from bs4 import BeautifulSoup
import requests
import os
import urllib.request
import time
headers = {
"Cookie": "gsScrollPos-1702684443=0; UM_distinctid=16685e0279d3e0-06f34603dfa898-36664c08-1fa400-16685e0279e133; bdshare_firstime=1539844405694; _d_id=6556c25e9ddd0e7e71096a1e343f6b; gsScrollPos-1702684407=; CNZZDATA1254092508=1744643453-1539842703-%7C1540364765",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
}
path = "D://images/"
def get_links(url):
wb_data = requests.get(url, headers=headers) # headers 假裝
wb_data.encoding = "utf-8"
soup = BeautifulSoup(wb_data.text, 'lxml')
if wb_data.status_code == 404:
return
if not os.path.exists(path): # 判斷該文件夾是否存在,不存在則建立
os.mkdir(path)
links = soup.select(".item-img")
for link in links:
download_img(link.get('href'))
time.sleep(1)
# 下一頁
next_page = soup.select(".next")[0].get("href")
print("------ next page -------")
get_links(next_page)
print("------ download done -------")
def download_img(url):
wb_data = requests.get(url, headers=headers)
wb_data.encoding = "utf-8"
soup = BeautifulSoup(wb_data.text, 'lxml')
images = soup.select(".wr-single-content-list img")
catalog = soup.select("h1")[0].get_text() # 獲取詳情頁標題做爲文件夾名稱
catalog = path + catalog + "/"
if not os.path.exists(catalog):
os.mkdir(catalog)
for index, image in enumerate(images): # enumerate 是 Python 的內置函數,用它能夠同時返回索引和值
print(index)
img = image.get("src")
urllib.request.urlretrieve(img, catalog + str(index) + ".jpg")
print("-------- downloading ---------")
if __name__ == "__main__":
get_links("http://www.shuaia.net/index.html")
複製代碼