爬蟲案例

1、爬取貓眼電影TOP100
2、爬取魔法花園全部花種
3、爬取魔法花園魔法屋
4、爬取pixabay圖片
5、爬取餓了麼單個地區商家信息
6、爬取淘女郎
7、爬取FIND ICONES
8、爬取鬥圖網圖片
9、未完待續..
 
1、爬取貓眼電影TOP100
from multiprocessing import Pool
import requests
from bs4 import BeautifulSoup
import re
import os
from random import choice
 
 
def get_page_url():
    pass
 
 
def parse_page_url(url):
    print('Downloading:',url)
    headers = [
        'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
        'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
        'Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0'
        ]
    header = {'user-agent': choice(headers)}
 
    try:
        r = requests.get(url,headers=header)
        if r.status_code is 200:
            print('訪問成功')
            return r.text
        else:
            print('返回狀態碼不爲200,請修改代碼')
            return None
    except (ConnectionError):
        print('Error occurred')
        return None
    except Exception as result:
        print('捕獲到其餘異常')
        print(result)
        return None
 
 
def get_text(text):
    soup = BeautifulSoup(text, 'lxml')
    pattern = re.compile(
        '<dd>.*?board-index.*?>(.*?)</i>' + '.*?data-val="{movieId:(.*?)}" href='
        '.*?src="(.*?)"/>\n<img.*?name.*?>(.*?)</a></p>.*?class="star">(.*?)</p>' +
        '.*?"releasetime">(.*?)</p>.*?</dd>', re.S
        )
    
    items = re.findall(pattern, str(soup))
    for item in items:
        yield {
            'name': item[3].split('>')[1],
            'id':item[1],
            'action': item[4].split(':')[1].split(' ')[0],
            'time': item[5].split(':')[1],
            #'img':'http:' + item[2]
            }
 
 
def write_text(content):
    print('正在保存:%s,請稍等...'%content['name'])
    with open('c://moayanmove.txt', 'a', encoding='utf-8') as f:
        f.write(str(content) + '\n')
        print('保存成功')
        f.close()
 
 
def main(Groups):
    urls = 'http://maoyan.com/board/4?offset=' + str(Groups)
    texts = parse_page_url(urls)
    for item in get_text(texts):
        write_text(item)
 
 
if __name__ == '__main__':
    pool = Pool()
    Groups = ([i*10 for i in range(10)])
    pool.map(main, Groups)
    pool.close()
    pool.join()
 
 
2、爬取魔法花園全部花種
import requests
import re
from bs4 import BeautifulSoup
import time
import random
import xlwt
url_half = 'http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&pageNo={}'
url_list = [url_half.format(i) for i in range(24)]
 
 
# print(url_list)
def get_html(url):
    res = requests.get(url, headers=headers)
    r_text = res.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
    pattern = r'<img src="(.*?)"/>\n<a href=\'(.*?)\'>(.*?)</a>'
 
    t = re.findall(pattern, r_text, re.S)
    flower_list = []
    for item in t:
        time.sleep(random.random())
        flower_dict = {}
        flower_img = 'http://h5.pinpinhu.com' + item[0]
        flower_id = item[1].rsplit(';', maxsplit=1)[1].rsplit('=', maxsplit=1)[1]
        flower_name = item[2]
        _s = item[1].split(';')
        valid_url = 'http://h5.pinpinhu.com' + _s[0] + _s[1].split('amp')[0] + _s[2].split('amp')[0] + _s[3]
        pattern = '(.*?)HOME_SESSION.*?jvm1(.*)'
        url_half = re.findall(pattern, valid_url, re.S)[0]
        flower_url = url_half[0] + url_half[1]
        flower_dict['flower_name'] = flower_name
        flower_dict['flower_id'] = flower_id
        flower_dict['flower_url'] = flower_url
 
        res_2 = requests.get(flower_url, headers=headers)
        a_2 = res_2.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
        li_2 = re.findall(r'花之圖譜(.*?)返回花之圖譜', a_2, re.S)[0]
        pattern_level = r'.*花種等級:(.*?)<br/>.*'
        pattern_price = r'.*種子價格:(.*?)家幣<br/>.*'
        pattern_max = r'.*[預計成花|成花數量]:(.*?)<br/>.*保底數量.*'
        pattern_min = r'.*保底數量:(.*?)<br/>.*'
 
        pattern_time = r'.*預計時間:(.*?)<br/>.*'
        pattern_word = r'.*[鮮花花語|鮮花話語]:(.*?)<br/>.*'
        try:
            s_level = re.findall(pattern_level, li_2, re.S)[0]
        except:
            s_level = ''
        try:
            s_price = re.findall(pattern_price, li_2, re.S)[0]
        except:
            s_price = ''
        try:
            s_max = re.findall(pattern_max, li_2, re.S)[0]
        except:
            s_max = re.findall(pattern_max, li_2, re.S)
        try:
            s_min = re.findall(pattern_min, li_2, re.S)[0]
        except:
            s_min = re.findall(pattern_min, li_2, re.S)
        try:
            s_time = re.findall(pattern_time, li_2, re.S)[0]
        except:
            s_time = re.findall(pattern_time, li_2, re.S)
        try:
            s_word = re.findall(pattern_word, li_2, re.S)[0]
        except:
            s_word = re.findall(pattern_word, li_2, re.S)
        if s_price:
            pattern_other = '商城購買'
        else:
            pattern_other = r'.*[鮮花花語|鮮花話語]:.*?<br/>(.*?)<br/>'
        try:
            s_other = re.findall(pattern_other, li_2, re.S)[0]
        except:
            s_other = re.findall(pattern_other, li_2, re.S)
        
        flower_dict['level'] = s_level
        flower_dict['price'] = s_price
        flower_dict['max'] = s_max
        flower_dict['min'] = s_min
        flower_dict['time'] = s_time
        flower_dict['word'] = s_word
        flower_dict['other'] = s_other
        flower_dict['flower_img'] = flower_img
        flower_list.append(flower_dict)
    return flower_list
 
 
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
 
}
flower_list = []
for i in range(1,24):
    # 普通:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&pageNo=23
    # 獨特:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&type=1&pageNo=6
    # 珍稀:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&type=2&pageNo=6
    # 極品:http://h5.pinpinhu.com/gd-graph/index.action?uid=3640469&sid=7ddb5d624f398d043b74d1fa21af3c8f4ec83ad1&type=3&pageNo=7
 
    url = 'http://h5.pinpinhu.com/gd-graph/index.action?uid=3981003&sid=3dd8cff09524f7af218b2af16ef6d26a5b69c6f2&type=0&pageNo={}'.format(i)
    print('------------第{}頁--------共8頁--------'.format(i))
    a = get_html(url)
    flower_list += a
    time.sleep(random.randint(1,5))
 
print(flower_list)
time.sleep(1)
workbook = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = workbook.add_sheet('普通花朵', cell_overwrite_ok=True)
x = 1
for item in flower_list:
        time.sleep(0.1)
 
        sheet.write(x, 0, item['flower_name'])
        sheet.write(x, 1, item['flower_id'])
        sheet.write(x, 2, item['flower_url'])
        sheet.write(x, 3, item['level'])
        sheet.write(x, 4, item['price'])
        sheet.write(x, 5, item['max'])
        sheet.write(x, 6, item['min'])
        sheet.write(x, 7, item['time'])
        sheet.write(x, 8, item['word'])
        sheet.write(x, 9, item['other'])
        sheet.write(x, 10, item['flower_img'])
        x += 1
 
workbook.save(r'普通.xls')
 
 
3、爬取魔法花園魔法屋
import requests
import re
import time
import random
 
 
def get_html(url):
    headers = {
    'user-agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
    }
    res = requests.get(url=url, headers=headers)
    s = res.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
    pattern = r'.*極品(.*?)返回花園首頁.*'
    a = re.findall(pattern, s, re.S)[0]
    pattern = r'.*?<img src="(.*?)"/><a href=\'(.*?)\'>(.*?)</a>.*?'
    b = re.findall(pattern, a, re.S)
    flower_list = []
    for item in b:
        time.sleep(random.random())
        flower_dict = {}
        flower_img = 'http://h5.pinpinhu.com' + item[0]
        flower_id = item[1].rsplit('id=', maxsplit=1)[1].split('&')[0]
        flower_name = item[2]
        _s = item[1].split(';')
        valid_url = 'http://h5.pinpinhu.com' + _s[0] + _s[1].split('amp')[0] + _s[2].split('amp')[0] + _s[3]
        pattern = '(.*?)HOME_SESSION.*?jvm1(.*)'
        url_half = re.findall(pattern, valid_url, re.S)[0]
        flower_url = url_half[0] + url_half[1]
 
        res_2 = requests.get(flower_url, headers=headers)
        a_2 = res_2.text.replace(u'\u266d', u'').replace(u'\xa9', u'')
        li_2 = re.findall(r'魔法屋(.*?)返回花園首頁', a_2, re.S)[0]
        pattern_level = r'.*種子等級:(.*?)<br/>.*'
        pattern_compound = r'.*合成需消耗:(.*?)所需花朵不足.*'
        level = re.findall(pattern_level, li_2, re.S)[0]
        compound = re.findall(pattern_compound, li_2, re.S)[0]
        flower_dict['name'] = flower_name
        flower_dict['level'] = level
        flower_dict['flower_id'] = flower_id
 
        _a = compound.replace(r'<br/>', '').replace('\n', '')
        li_half = [i.rsplit('(', maxsplit=1) for i in _a.replace('/0)', '|').split('|')]
        detail_list = [i for i in li_half if not i==['']]
        flower_dict['detail_list'] = detail_list
        flower_dict['flower_img'] = flower_img
        flower_dict['flower_url'] = flower_url
        flower_list.append(flower_dict)
    return flower_list
 
flower_list = []
for i in range(1,2):
    url = 'http://h5.pinpinhu.com/gd-synthesis/index.action?uid=3981003&sid=0a05853e3cc8ea83eeb896806280c894be5aa59b&type=0&subType=0'
    print('---------%s-------'%i)
    a = get_html(url)
    flower_list += a
    time.sleep(3)
 
import xlwt
time.sleep(1)
workbook = xlwt.Workbook(encoding='utf-8', style_compression=0)
sheet = workbook.add_sheet('普通花朵合成', cell_overwrite_ok=True)
x = 1
for item in flower_list:
    time.sleep(0.1)
    sheet.write(x, 0, item['name'])
    sheet.write(x, 1, int(item['flower_id']))
    sheet.write(x, 2, str(item['level']))
    sheet.write(x, 3, str(item['detail_list']))
    sheet.write(x, 4, item['flower_img'])
    sheet.write(x, 5, item['flower_url'])
    x += 1
 
workbook.save(r'1.xls')
 
 
4、爬取pixabay圖片
#pixabay圖片下載
#原網址  https://pixabay.com/
import requests
from bs4 import BeautifulSoup
import os
root = 'F:\\圖片'
url_s = []
pixabay_day = []
base_url = 'https://pixabay.com/zh/photos/?order=ec&pagi='
for i in range(1,2):
    urls = base_url + str(i)
    url_s.append(urls)
for urls in url_s:
    request = requests.get(urls)
    content = request.content
    soup = BeautifulSoup(content,'lxml')
    img_list = soup('img')
    for img in img_list:
        url =img['src']
        url = url.split('/static')[0]
        pixabay_day.append(url)
        path = root + url.split('/')[-1]
        try:
            if not os.path.exists(root):
                os.mkdir(root)
            if not os.path.exists(path):
                r = requests.get(url)
                with open(path,'wb') as f:
                    f.write(r.content)
                    f.close()
                    print('文件保存成功')
            else:
                print('文件已存在')
        except:
            print('爬取失敗')
 
5、爬取餓了麼單個地區商家信息
import urllib.request
import os
import time
import json
from  openpyxl  import Workbook
from  openpyxl  import load_workbook
 
keywordExcel="keyword.xlsx"  #關鍵字檢索外賣地點保存路徑
keywords=["鄭州","商丘"]  #關鍵字集合
targetDir ="test"  #文件保存路徑
 
 
def reqsetting():#首先構造請求頭headers,url目前暫時保存根路徑
    weburl = "https://mainsite-restapi.ele.me/v2/pois?"
    #extra1="extras%5B%5D=count&geohash=wx4g0bmjetr7&keyword=%E6%9C%9D%E9%98%B3&limit=20&type=nearby"
    webheaders={
    "Accept":"application/json, text/plain, */*",
    "Accept-Language":"zh-CN,zh;q=0.8",
    "Connection":"keep-alive",
    "Cookie":"ubt_ssid=plds7ye19rj2rghg3oaar8hkt89yy7f1_2017-02-07; _utrace=ac9073c509bedb74b28a1482bd95a9d8_2017-02-07",
    "Host":"mainsite-restapi.ele.me",
    "Origin":"https://www.ele.me",
    "Referer":"https://www.ele.me/place/wx4g4h5shqf",
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36"
        }
    req=urllib.request.Request(url=weburl,headers=webheaders)
    return req
 
 
def write2Excel(jsondata,title):#根據不一樣的關鍵字將數據寫入到excel中
    fileName=keywordExcel
    if(os.path.exists(fileName)):
        wb=load_workbook(fileName)
    else:
        wb=Workbook()
    ws=wb.create_sheet(title)    
    ws.column_dimensions["A"].width =10.0
    ws.append(["ID","城市","geohash","名稱","地址","商家總數","經度","緯度","request_id","short_address"])
    ws.column_dimensions["A"].width =30.0
    ws.column_dimensions["B"].width =10.0
    ws.column_dimensions["C"].width =18.0
    ws.column_dimensions["D"].width =20.0
    ws.column_dimensions["E"].width =50.0
    ws.column_dimensions["F"].width =10.0
    ws.column_dimensions["G"].width =10.0
    ws.column_dimensions["H"].width =10.0
    ws.column_dimensions["I"].width =25.0
    ws.column_dimensions["J"].width =40.0
    for i in range(len(jsondata)):
        row=jsondata[i]
        ws.append([row["id"],row["city"],row["geohash"],row["name"],row["address"],row["count"],
                   row["longitude"],row["latitude"],row["request_id"],row["short_address"]])
    wb.save(fileName)
    
 
def excelName():#根據日期生成文件
    if not os.path.isdir(targetDir):  
        os.mkdir(targetDir)
    excelName=str(time.strftime ("%Y-%m-%d")+".xlsx")
    completePath=targetDir+"\\"+excelName
    return completePath
 
 
def reqsetting():#初始化url請求,須要實時替換的是extral  和  header裏的referer
    weburl = "https://mainsite-restapi.ele.me/shopping/restaurants?"
    extra1="extras%5B%5D=activities&geohash=wx4g56v1d2m&latitude=39.91771&limit=24&longitude=116.51698&offset=0&terminal=web"
    webheaders={
    "Accept":"application/json, text/plain, */*",
    "Accept-Language":"zh-CN,zh;q=0.8",
    "Connection":"keep-alive",
    "Cookie":"ubt_ssid=plds7ye19rj2rghg3oaar8hkt89yy7f1_2017-02-07; _utrace=ac9073c509bedb74b28a1482bd95a9d8_2017-02-07",
    "Host":"mainsite-restapi.ele.me",
    "Origin":"https://www.ele.me",
    #"Referer":"https://www.ele.me/place/wx4g56v1d2m",
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36"
        }
    req=urllib.request.Request(url=weburl,headers=webheaders)
    return req
 
 
def write2Excel(jsondata,title):#根據不一樣的商圈地點寫入數據,每一個商圈地點佔用excel 的一個sheet
    fileName=excelName()
    isexit="false"
    if(os.path.exists(fileName)):
        wb=load_workbook(fileName)
        isexit="true"
    else :
       wb=Workbook()
    if(wb.__contains__(title)):
        ws=wb[title]
        ws.append([])
    else:
        ws=wb.create_sheet(title)
        ws.column_dimensions["A"].width =10.0
        ws.column_dimensions["B"].width =40.0
        ws.column_dimensions["C"].width =60.0
        ws.column_dimensions["D"].width =10.0
        ws.column_dimensions["E"].width =18.0
        ws.column_dimensions["F"].width =10.0
        ws.append(["ID","店名","地址","距離","人均消費","月銷售額"])
    
    for i  in range(len(jsondata)):
        row=jsondata[i]
        #print(type(row))
        if("average_cost" not  in row.keys()):
            row["average_cost"]="無人均消費數據"
        ws.append([row["id"],row["name"],row["address"],row["distance"],row["average_cost"],row["recent_order_num"]])
    wb.save(fileName)
    
 
def readKeyWordFromExcel():#從上一節生成的keywordExcel 中加載商圈地點
    fileName=keywordExcel
    if(os.path.exists(fileName)):
        wb=load_workbook(fileName)
    else:
        return
    for title in wb.sheetnames:
        ws=wb[title]
        for i in range(2,ws.max_row):
            infos={}#商圈地點數據,爲生成請求參數作準備
            infos["name"]=ws.cell(row=i,column=4).value
            print("正在爬取 %s 附近外賣商家的數據..." % infos["name"])
            infos["ID"]=ws.cell(row=i,column=1).value
            infos["geohash"]=ws.cell(row=i,column=3).value
            infos["longitude"]=ws.cell(row=i,column=7).value
            infos["latitude"]=ws.cell(row=i,column=8).value
            if(infos["geohash"]):
                req=reqsetting()
                req.add_header("Refer","https://www.ele.me/place/%s" % infos["geohash"])#修改請求頭的refer
                newUrl=req.get_full_url()
                offset=0
                contentBytes=""
                while(contentBytes!="[]"):#構造請求參數,基本上只修改offset 偏移量數據
                    params={
                    "extras[]":"activities",
                    "geohash":"%s" % infos["geohash"],
                    "latitude":"%s" % infos["latitude"],
                    "longitude":"%s" % infos["longitude"],
                    "terminal":"web",
                    "limit":24,
                    "offset":offset
                       }
                    params=urllib.parse.urlencode(params)#請求參數編碼
                    req.full_url=newUrl+params   #從新生成url請求
                    webpage=urllib.request.urlopen(req)
                    contentBytes = webpage.read().decode("utf-8")
                    if(contentBytes!="[]"):
                        jsondata=json.loads(contentBytes)        
                        write2Excel(jsondata,infos["name"])#將請求數據寫入excel中
                        offset+=24 #便宜
                    else :
                        break
 
 
if __name__ == '__main__':  #程序運行入口
    if(os.path.exists(keywordExcel)):
        os.remove(keywordExcel)
    req=reqsetting()
    newUrl=req.get_full_url()
    for keyword in keywords:#遍歷關鍵字集合,構造不一樣的請求參數,附加到URL 請求上
        params={
        "extras[]":"count",
        "geohash":"wx4g0bmjetr7",
        "keyword":"%s" % keyword,
        "limit":"20",
        "type":"nearby"
            }
        params=urllib.parse.urlencode(params)#將請求參數進行編碼
        req.full_url=newUrl+params#從新構造請求參數
       
        webpage=urllib.request.urlopen(req)#獲取數據
        contentBytes = webpage.read().decode("utf-8")
        jsondata=json.loads(contentBytes)#將數據解析成json格式
        write2Excel(jsondata,keyword)#將數據寫入excel 中
    time.sleep(10)
    offset=0;
    readKeyWordFromExcel()
 
 
6、爬取淘女郎
#!-*- coding:utf-8 -*-
 
import urllib.request
import re
def getUrlList(url):
    html = urllib.request.urlopen(url)
    response = str(html.read())
    return response
def getImageList(html):
    pattern = re.compile('.*?"avatarUrl":"(.*?)","cardUrl":"(.*?)","city":"(.*?)","height":"(.*?)",.*?"realName":"(.*?)",.*?"totalFavorNum":(.*?),"userId":(.*?),.*?"weight":"(.*?)".*?',re.S)
    items = re.findall(pattern, html)
    for item in items:
        yield {
            'avatarUrl': 'http:' + str(items[0]).split('\'')[1],
            'cardUrl': 'http:' + str(item[1]),
            'city': item[2],
            'height': item[3],
            'realName': item[4],
            'totalFavorNum': item[5],
            'userId': item[6],
            'weight': item[7],
        }
 
def main():
    url = 'https://mm.taobao.com/tstar/search/tstar_model.do?_input_charset=utf-8'
    html = getUrlList(url)
    for item in getImageList(html):
        print(item)
 
if __name__ == '__main__':
    main()
 
7、爬取FIND ICONES
#FIND ICONES圖標集
#源網址 https://findicons.com/
 
import requests
import os
from bs4 import BeautifulSoup
 
root = 'D:/圖片/'
base_urls = 'https://findicons.com/pack/2787/beautiful_flat_icons/'
for i in range(1,8):
    base_url = base_urls + str(i)
    r = requests.get(base_url)
    content = r.content
    soup = BeautifulSoup(content,'lxml')
    img_list = soup('img')
    for img in img_list:
        url = img['src']
        path = root + url.split('/')[-1]
        try:
            if not os.path.exists(root):
                os.mkdir(root)
            if not os.path.exists(path):
                r = requests.get(url)
                with open(path,'wb') as f:
                    f.write(r.content)
                    f.close()
                    print('文件保存成功')
            else:
                print('文件已存在')
        except:
            print('爬取失敗')
 
 
8、爬取鬥圖網圖片
#鬥圖網圖片下載
 
from tkinter import *
import os
import requests
from bs4 import BeautifulSoup
 
def do():
    urls = 'https://www.doutula.com/photo/list/?page=3'
    root = 'E:/圖片/'
    request = requests.get(urls)
    content = request.content
    soup = BeautifulSoup(content,'lxml')
    img_list = soup.find_all('img',attrs = {'class':'img-responsive lazy image_dta'})
    for img in img_list:
        url_a = 'http:' + img['data-original']
        url = url_a.split('!')[0]
        path = root + url.split('/')[-1]
        try:
            if not os.path.exists(root):
                os.mkdir(root)
            if not os.path.exists(path):
                r = requests.get(url)
                with open(path,'wb') as f:
                    f.write(r.content)
                    f.close()
                    print('文件保存成功')
            else:
                print('文件已存在')
        except:
            print('爬取失敗')
app = Tk()
Button(text = 'click',command = do).pack()
 
app.mainloop()
 
9、未完待續..
相關文章
相關標籤/搜索