http://www.allitebooks.org/
是我見過最良心的網站,全部書籍免費下載
週末無聊,嘗試採集此站全部Pdf書籍。html
Beautiful souppython
最簡單的爬蟲,沒有考慮太多的容錯,建議你們嘗試的時候,溫柔點,別把這個良心網站搞掛掉了json
# www.qingmiaokeji.cn 30 from bs4 import BeautifulSoup import requests import json siteUrl = 'http://www.allitebooks.org/' def category(): response = requests.get(siteUrl) # print(response.text) categoryurl = [] soup = BeautifulSoup(response.text,"html.parser") for a in soup.select('.sub-menu li a'): categoryurl.append({'name':a.get_text(),'href':a.get("href")}) return categoryurl def bookUrlList(url): # urls = [] response = requests.get(url['href']) soup = BeautifulSoup(response.text,"html.parser") a = soup.select(".pagination a[title='Last Page →']") nums = 0 for e in a: nums = int(e.get_text()) # print(e.get_text()) for i in range(1,nums+1): # print(url+"page/"+str(i)) # urls.append(url+"page/"+str(i)) bookList(url['href']+"page/"+str(i)) def bookList(url): response = requests.get(url) soup = BeautifulSoup(response.text,"html.parser") article = soup.select(".main-content-inner article .entry-title a") for i in article: url = i.get("href") getBookDetail(url) def getBookDetail(url): response = requests.get(url) soup = BeautifulSoup(response.text,"html.parser") title = soup.select(".single-title")[0].text imgurl = soup.select(".entry-body-thumbnail .attachment-post-thumbnail")[0].get("src") downLoadPdfUrl = soup.select(".download-links a")[0].get("href") with open('d:/booklist.txt', 'a+',encoding='utf-8') as f: f.write(title+" | ![]("+imgurl+") | "+ downLoadPdfUrl+"\n") if __name__ == '__main__': list = category() for url in list: bookUrlList(url)