python_爬蟲_微信公衆號抓取

目前卡在視頻部分,公衆號的視頻來源是騰訊視頻,播放和下載地址都是加密的,目前vid及vkey均已經獲取,但使用爬蟲獲得的結果永遠是40三、405,還沒有解決。mysql

考慮方法:ios

   selenium當頁面加載後查看廣告用時,等廣告加載時間過去後再點擊視頻,再由網頁中獲取加載的視頻地址進行下載,明天測試看有無效果web

import requests,pymysql
import json,jsonpath,random,re,time,datetime,os,imghdr
from lxml import etree
from selenium import webdriver
from urllib import request
import ssl
ssl._create_default_https_context = ssl._create_unverified_context

'''
注意點:若是同一時間內刷新次數,或者獲取分頁太頻繁,會被封
'''
# --------------------
user_info = {'username':'####@163.com','password':'####'}
base_url = 'https://mp.weixin.qq.com/'
base_headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
query_list = [ # 須要抓取的公衆號列表
    {'fakeid':'MzIzOTQ0MTUwMA==','nickname':'Sir電影'},
    {'fakeid':'MzIxODc5MzM4NQ==','nickname':'魚Sir電影'},
]
table = 'p_weixin' # 數據庫名稱
key = 'title,author,js_name,publish_time,images,vedios'

# --------------------

def get_cookie(): # 登錄並獲取cookie值
    driver = webdriver.Chrome(executable_path=r'/Applications/Google Chrome.app/chromedriver')
    driver.get(base_url)
    time.sleep(2) # 頁面緩衝

    driver.find_element_by_name('account').clear()
    driver.find_element_by_name('account').send_keys(user_info['username'])
    driver.find_element_by_name('password').clear()
    driver.find_element_by_name('password').send_keys(user_info['password'])

    driver.find_element_by_class_name('icon_checkbox').click()
    driver.find_element_by_class_name('btn_login').click()
    time.sleep(25) # 等待手機掃碼

    c_total = driver.get_cookies()
    cookies = {} # cookies存儲
    for i in c_total:
        cookies[i['name']] = i['value']
    return cookies

    
def get_info(): # 獲取公衆號名稱、總頁數、token、fakeid等信息,
    cookies = get_cookie()

    res_token = requests.get(base_url,cookies=cookies,headers=base_headers)
    token = re.compile(r'token=(\d+)').findall(str(res_token.url))

    for query in query_list: # 從列表裏控制要爬取多少個公衆號
        url = 'https://mp.weixin.qq.com/cgi-bin/appmsg' # 公衆號裏面的電影

        fakeid = query['fakeid']
        appmsg = {
            'token': token,
            'lang': 'zh_CN',
            'f': 'json',
            'ajax': '1',
            'random': random.random(),
            'action': 'list_ex',
            'begin': '0',
            'count': '5',
            'fakeid': fakeid,
            'type': '9',
        }
        res_cnt = requests.get(url,params=appmsg,cookies=cookies) # 看總共多少頁 一頁16篇,begin 0~4爲一頁
        res_cnt = json.loads(res_cnt.text)

        for cnt in range(0,res_cnt['app_msg_cnt'],5): # 循環全部分頁
            appmsg['begin'] = cnt # 當前的分頁值
            response = requests.get(url,params=appmsg,cookies=cookies)
            
            data_list = json.loads(response.text)
            for data in data_list['app_msg_list']: # 對當前頁裏的信息進行提取
                yield [data['title'],data['link']]
            time.sleep(random.randrange(30,41)) # 設置間隔

        print('公衆號:%s,共有文章%s' % (query['nickname'],res_cnt['app_msg_cnt'])) # 返回頁數,這裏應該單獨輸入,不須要存儲
            
def get_news():
# def get_news(url): # 獲取文章,這裏能夠用協程?也須要換agent
    '''視頻、音頻、圖片'''
    print('-'*40)
    url = 'https://mp.weixin.qq.com/s?src=3&timestamp=1533397256&ver=1&signature=RbnX4tUBODpql9qsvp4jJRDrtHc-LSXXm9gSM*BNY*PTRKHJ2bUyeKkGPlpKGGsnKl4IyaxubTPPWv6jQzhm52M7qFY5*BJ8dEugb4XPUcLRSs8U-4Bb9ab9mso2NWDq0*RwRzZ2*zZ6r1YyQtNjpg=='
    res = request.Request(url,headers=base_headers)
    response = request.urlopen(res)

    re_data = response.read().decode()
    data = etree.HTML(re_data)
    title = get_try(data, '//h2[@id="activity-name"]/text()')  # 標題
    author = get_try(data, '//div[@id="meta_content"]//span[@class="rich_media_meta rich_media_meta_text"]//text()')  # 做者
    js_name = get_try(data, '//div[@id="meta_content"]//span[@class="rich_media_meta rich_media_meta_text"]//text()')  # 公衆號名稱
    publish_time = re.compile(r'var publish_time.*?\"(.*?)\"').findall(re_data)[0]  # 發佈時間 昨天、前天、今天、1周前

    images_list = []  # 圖片
    vedio_list = []  # 音視頻

    # 還有圖片、視頻、音頻地址
    js_content = data.xpath('//div[@id="js_content"]//p//text()|//div[@id="js_content"]//p//img/@data-src|//div[@id="js_content"]//p//iframe/@data-src|//mpvoice')
    for i in range(len(js_content)):
        if '' == js_content[i] or '' == js_content[i]:
            js_content[i] = ''
        elif isinstance(js_content[i], etree._Element):  # 音頻
            res = js_content[i].xpath('//mpvoice/@voice_encode_fileid')[0]
            js_content[i] = 'https://res.wx.qq.com/voice/getvoice?mediaid={}'.format(res)
            vedio_list.append(js_content[i])
        elif 'pic' in js_content[i]:  # 圖片
            images_list.append(js_content[i])
        elif 'v.qq' in js_content[i]:  # 視頻
            vedio_json = 'https://h5vv.video.qq.com/getinfo?callback=txplayerJsonpCallBack_getinfo_24936&otype=json&vid={}' # 包括視頻播放地址的json文件
            url = vedio_json.format(js_content[i].split('vid=')[-1].split('&')[0])
            js_content[i] = url
            vedio_list.append(js_content[i])
        else:
            js_content[i] = '<p>%s</p>' % js_content[i]

    get_video(vedio_list) # 從視頻、音頻json文件地址分析出下載路徑



    print('-' * 30)

    total_data = {
        'title': title,
        'author': author,
        'js_name': js_name,
        'publish_time': publish_time,
        'js_content': js_content,
        'images': images_list,
        'vedios': vedio_list
    }
    # Down(total_data)  # 下載函數

def get_try(data, fangfa):  # 把try except封到一塊兒
    try:
        res = data.xpath(fangfa)[0].strip()
        return res
    except Exception as e:
        return '暫無'

def get_video(url_list): # 獲取視頻 若是是音頻,直接下載,不然轉到視頻界面處理
    print('獲取音、視頻路徑列表',url_list)
    for base_url in url_list:
        if 'voice' in base_url:
            pass
            # voice_name = base_url.split('=')[-1][-10:]
            # request.urlretrieve(base_url,'./'+voice_name+'.mp3') # 目前mp3能夠下載沒有加密,後續須要轉到Down函數統一處理
        else:
            print('視頻的json文件地址',base_url)
            res = request.Request(base_url,headers=base_headers)
            response = request.urlopen(res)

            video_json = re.compile(r'txplayerJsonpCallBack_getinfo_24936\((.*)\)',re.S).search(response.read().decode()).group(1)
            video_data = json.loads(video_json)

            title = jsonpath.jsonpath(video_data,'$..vl.vi..ti')[0]
            vid = jsonpath.jsonpath(video_data,'$..vl.vi..lnk')[0]
            vkey = jsonpath.jsonpath(video_data,'$..vl.vi..fvkey')[0]
            fn = jsonpath.jsonpath(video_data,'$..vl.vi..fn')[0]
            url_list = jsonpath.jsonpath(video_data,'$..vl.vi..ul.ui')[0]

            full_url = 'http://ugcsjy.qq.com/'+vid+'.p712.1.mp4?vkey='+vkey
            print('下載路徑',full_url)

            try:
                base_headers['Host'] = 'ugcbsy.qq.com'
                v_response = requests.get(full_url,headers = base_headers)
                print(base_headers)
                print(v_response.status_code)
            except Exception as e:
                print('該下載路徑下載失敗',e)

def Down(data):  # 先下載 下載以後還須要改路徑名稱 按時間創建文件夾
    # 檢測是否存在、下載、改類型
    # 視頻如今路徑有問題,先搞定音頻和圖片名稱和路徑
    for i in [data['images'],data['vedios']]:
        for img in i:
            img_name = img.split('/')[-2][-10:]  # 下載後的圖片名稱

            down_path = '../download/公衆號_圖片音視頻/{}'.format(img_name)  # 下載路徑

            path = os.listdir('../download/公衆號_圖片音視頻/')
            path = ','.join(path) # 把當前所須要下載的文章資源路徑拼接在一塊兒,便於使用re.search方法判斷

            if re.search(img_name + '.*', path): # 正則匹配後綴名,避免判斷時由於本地文件已經改了後綴找不到
                print('文件已存在', '-', img_name)
            else:
                request.urlretrieve(img, down_path)  # 下載
                end_name = imghdr.what(down_path)  # 後綴名稱
                if end_name: # imghdr只能查看圖片類,視頻不能判斷,因此判斷是否爲空
                    os.rename(down_path, down_path + '.' + end_name)
                print('已下載成功', '-', down_path)


def Mydb(data): # 下載後再存數據庫
    db = pymysql.connect('127.0.0.1','root','123456','PaChong',charset='utf8')
    curosr = db.cursor()

    value = ','.join(['%s']*len(data))
    sql = 'insert into {}({}) VALUES({})'.format(table,key,value)
    data = data.values()

    curosr.execute(sql,data)

def main(): # 主體,從這裏運行就好
    start = datetime.datetime.now() # 開始時間
    for info in get_info(): # 獲取公衆號taken fakeid等信息
        get_news(info[-1]) # 獲取地址
    end = datetime.datetime.now() # 結束時間

    print('-'*30)
    print('總用時',end-start)
    print('-'*30)

if __name__ == '__main__':
    # main()
    get_news()
相關文章
相關標籤/搜索