爬取豆瓣圖書

未運行出想要的結果linux

#-*- coding:UTF-8 -*-

import sys
import time
import urllib
import urllib3
import importlib
import requests
import numpy as np
from bs4 import BeautifulSoup
from openpyxl import Workbook

importlib.reload(sys)

#Some User Agents
hds=[{'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},\
{'User-Agent':'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},\
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'}]

#function:得到標籤爲book_tag的全部圖書信息
#book_tag:圖書標籤 例如:'我的管理'
def book_spider(book_tag):
    #初始化頁數爲0
    page_num = 0
    #建立圖書列表
    book_list = []
    #嘗試次數初始化爲0
    try_times = 0

    while(1):
        # url='[圖片]http://www.douban.com/tag/%E5%B0%8F%E8%AF%B4/book?start=0' # For Test
        url = 'http://www.douban.com/tag/'+urllib.request.quote(book_tag)+'/book?start='+str(page_num*15)
        print("url is {}".format(url));
        #休眠
        time.sleep(np.random.rand()*5)

        #Last Version
        try:
            req = urllib.request.Request(url,headers=hds[page_num%len(hds)])

            #獲取到的網頁代碼
            source_code = urllib.request.urlopen(req).read()

            #強制轉換成字符串
            plain_text=str(source_code)


        except (urllib.error.HTTPError,urllib.error.URLError) as e:
            print(e)
            continue #url='[圖片]http://www.douban.com/tag/%E5%B0%8F%E8%AF%B4/book?start=0' # For Test #url='[圖片]http://www.douban.com/tag/%E5%B0%8F%E8%AF%B4/book?start=0' # For Test


        ##Previous Version, IP is easy to be Forbidden
        #source_code = requests.get(url)
        #plain_text = source_code.text

        soup = BeautifulSoup(plain_text)

        list_soup = soup.find('div',{'class':'mod book-list'})


        try_times+=1
        if list_soup == None and try_times < 200:
            continue
        elif list_soup==None or len(list_soup)<=1:
            break #Break When no information got after 200 times requesting

        for book_info in list_soup.findAll('dd'):
            title = book_info.find('a',{'class':'title'}).string.strip()
            desc = book_info.find('div',{'class':'desc'}).string.strip()
            desc_list = desc.split('/')
            book_url = book_info.find('a',{'class':'title'}).get('href')


            try:
                author_info = '做者/譯者: '+'/'.join(desc_list[0:-3])
            except:
                author_info = '做者/譯者: 暫無'
            try:
                pub_info = '出版信息:'+'/'.join(desc_list[-3:])
            except:
                pub_info = '出版信息: 暫無'
            try:
                rating = book_info.find('span',{'class':'rating_nums'}).string.strip()
            except:
                rating = '0.0'
            try:
                #people_num = book_info.findAll('span',{'class':'rating_nums'}).string.strip()
                people_num = book_info.findAll('span')[2].string.strip()
                people_num = get_people_num(book_url)
                people_num = people_num.strip('人評價')
            except:
                people_num='0'

            book_list.append([title,rating,people_num,author_info,pub_info])
            try_times = 0 #set 0 when got valid information
        page_num+=1
        print("Downloading Information From Page {}".format(page_num))
        print("如今的長度是",len(book_list))
    return book_list

#function:得到評分等級
def get_people_num(url):
    # url='http://book.douban.com/subject/6082808/?from=tag_all' # For Test
    try:
        req = urllib.request.Request(url,headers=hds[np.random.randint(0,len(hds))])
        source_code = urllib.request.urlopen(req).read()
        plain_text = str(source_code)
    except (urllib.error.HTTPError,urllib.error.URLError) as e:
        print(e)
    soup = BeautifulSoup(plain_text)
    people_num = soup.find('div',{'class':'rating_sum'}).findAll('span')[1].string.strip()
    return people_num

#function:獲取全部對應標籤的圖書列表
#book_tag_lists  ['我的管理', '時間管理', '投資', '文化', '宗教']
def do_spider(book_tag_lists):
    #建立書單列表
    book_lists=[]
    #遍歷圖書標籤列表
    for book_tag in book_tag_lists:
        #將單個圖書標籤傳入book_spider函數中,得到相應標籤的圖書信息並賦值給book_list
        book_list = book_spider(book_tag)
        #給圖書信息列表按照第二個關鍵字(rating)降序排序
        book_list=sorted(book_list,key=lambda x:x[1],reverse=True)
        #將排好序的圖書加入到書單列表中
        book_lists.append(book_list)
    return book_list

#function:將圖書信息讀到excel中
def print_book_lists_excel(book_lists,book_tag_lists):
    for nb in book_lists:
        print("真的醉了",nb)
    wb = Workbook()
    ws = []
    for i in range(len(book_tag_lists)):
        ws.append(wb.create_sheet(title=book_tag_lists[i]))#utf8-》unicode
    for i in range(len(book_tag_lists)):
        #序號
        count=1
        print("len of book_lists is the ",len(book_lists))

        for b1 in book_lists[i]:
            ws[i].append(['序號','書名','評分','評價人數','做者','出版社'])
            count+=1
            for b1 in book_tag_lists[i]:
                print("b0 is the ",b1[0])
                print("len is the ",len(b1))
                ws[i].append([count,b1[0],float(b1[1]),int(b1[2]),b1[3],b1[4]])
                count+=1
    save_path='book_list'
    for i in range(len(book_tag_lists)):
        save_path+=('-'+book_tag_lists[i])
    save_path+='.xlsx'
    print("這是個人名字:"+save_path)
    wb.save(save_path)
#function:主函數
def main():
        # book_tag_lists = ['心理','判斷與決策','算法','數據結構','經濟','歷史']
        # book_tag_lists = ['傳記','哲學','編程','創業','理財','社會學','佛教']
        # book_tag_lists = ['思想','科技','科學','web','股票','愛情','兩性']
        # book_tag_lists = ['計算機','機器學習','linux','android','數據庫','互聯網']
        # book_tag_lists = ['數學']
        # book_tag_lists = ['攝影','設計','音樂','旅行','教育','成長','情感','育兒','健康','養生']
        # book_tag_lists = ['商業','理財','管理']
        # book_tag_lists = ['名著']
        # book_tag_lists = ['科普','經典','生活','心靈','文學']
        # book_tag_lists = ['科幻','思惟','金融']
         #book_tag_lists = ['我的管理', '時間管理', '投資', '文化', '宗教']
        book_tag_lists = [ '文化', ]
        book_lists = do_spider(book_tag_lists)
        print_book_lists_excel(book_lists, book_tag_lists)
main()
相關文章
相關標籤/搜索