Python爬取視頻(實際上是一篇福利)

窗外下着小雨,做爲單身程序員的我逛着逛着發現一篇好東西,來自知乎 你都用 Python 來作什麼?的第一個高亮答案。python

到上面去看了看,地址都是明文的,得,趕忙開始吧。程序員

下載流式文件,requests庫中請求的stream設爲True就能夠啦,文檔在此多線程

先找一個視頻地址試驗一下:url

# -*- coding: utf-8 -*-
import requests

def download_file(url, path):
    with requests.get(url, stream=True) as r:
        chunk_size = 1024
        content_size = int(r.headers['content-length'])
        print '下載開始'
        with open(path, "wb") as f:
            for chunk in r.iter_content(chunk_size=chunk_size):
                f.write(chunk)


if __name__ == '__main__':
    url = '就在原帖...'
    path = '想存哪都行'
    download_file(url, path)

遭遇當頭一棒:線程

AttributeError: __exit__

這文檔也會騙人的麼!orm

看樣子是沒有實現上下文須要的__exit__方法。既然只是爲了保證要讓r最後close以釋放鏈接池,那就使用contextlib的closing特性好了:視頻

# -*- coding: utf-8 -*-
import requests
from contextlib import closing

def download_file(url, path):
    with closing(requests.get(url, stream=True)) as r:
        chunk_size = 1024
        content_size = int(r.headers['content-length'])
        print '下載開始'
        with open(path, "wb") as f:
            for chunk in r.iter_content(chunk_size=chunk_size):
                f.write(chunk)

程序正常運行了,不過我盯着這文件,怎麼大小不見變啊,究竟是完成了多少了呢?仍是要讓下好的內容及時存進硬盤,還能省點內存是否是:blog

# -*- coding: utf-8 -*-
import requests
from contextlib import closing
import os

def download_file(url, path):
    with closing(requests.get(url, stream=True)) as r:
        chunk_size = 1024
        content_size = int(r.headers['content-length'])
        print '下載開始'
        with open(path, "wb") as f:
            for chunk in r.iter_content(chunk_size=chunk_size):
                f.write(chunk)
                f.flush()
                os.fsync(f.fileno())

文件以肉眼可見的速度在增大,真心疼個人硬盤,仍是最後一次寫入硬盤吧,程序中記個數就行了:隊列

def download_file(url, path):
    with closing(requests.get(url, stream=True)) as r:
        chunk_size = 1024
        content_size = int(r.headers['content-length'])
        print '下載開始'
        with open(path, "wb") as f:
            n = 1
            for chunk in r.iter_content(chunk_size=chunk_size):
                loaded = n*1024.0/content_size
                f.write(chunk)
                print '已下載{0:%}'.format(loaded)
                n += 1

結果就很直觀了:內存

已下載2.579129%
已下載2.581255%
已下載2.583382%
已下載2.585508%

心懷遠大理想的我怎麼會只知足於這一個呢,寫個類一塊兒使用吧:

# -*- coding: utf-8 -*-
import requests
from contextlib import closing
import time

def download_file(url, path):
    with closing(requests.get(url, stream=True)) as r:
        chunk_size = 1024*10
        content_size = int(r.headers['content-length'])
        print '下載開始'
        with open(path, "wb") as f:
            p = ProgressData(size = content_size, unit='Kb', block=chunk_size)
            for chunk in r.iter_content(chunk_size=chunk_size):
                f.write(chunk)
                p.output()


class ProgressData(object):

    def __init__(self, block,size, unit, file_name='', ):
        self.file_name = file_name
        self.block = block/1000.0
        self.size = size/1000.0
        self.unit = unit
        self.count = 0
        self.start = time.time()
    def output(self):
        self.end = time.time()
        self.count += 1
        speed = self.block/(self.end-self.start) if (self.end-self.start)>0 else 0
        self.start = time.time()
        loaded = self.count*self.block
        progress = round(loaded/self.size, 4)
        if loaded >= self.size:
            print u'%s下載完成\r\n'%self.file_name
        else:
            print u'{0}下載進度{1:.2f}{2}/{3:.2f}{4} 下載速度{5:.2%} {6:.2f}{7}/s'.\
                  format(self.file_name, loaded, self.unit,\
                  self.size, self.unit, progress, speed, self.unit)
            print '%50s'%('/'*int((1-progress)*50))

運行:

下載開始
下載進度10.24Kb/120174.05Kb 0.01% 下載速度4.75Kb/s 
///////////////////////////////////////////////// 
下載進度20.48Kb/120174.05Kb 0.02% 下載速度32.93Kb/s 
/////////////////////////////////////////////////

看上去舒服多了。

下面要作的就是多線程同時下載了,主線程生產url放入隊列,下載線程獲取url:

# -*- coding: utf-8 -*-
import requests
from contextlib import closing
import time
import Queue
import hashlib
import threading
import os


def download_file(url, path):
    with closing(requests.get(url, stream=True)) as r:
        chunk_size = 1024*10
        content_size = int(r.headers['content-length'])
        if os.path.exists(path) and os.path.getsize(path)>=content_size:
            print '已下載'
            return
        print '下載開始'
        with open(path, "wb") as f:
            p = ProgressData(size = content_size, unit='Kb', block=chunk_size, file_name=path)
            for chunk in r.iter_content(chunk_size=chunk_size):
                f.write(chunk)
                p.output()


class ProgressData(object):

    def __init__(self, block,size, unit, file_name='', ):
        self.file_name = file_name
        self.block = block/1000.0
        self.size = size/1000.0
        self.unit = unit
        self.count = 0
        self.start = time.time()
    def output(self):
        self.end = time.time()
        self.count += 1
        speed = self.block/(self.end-self.start) if (self.end-self.start)>0 else 0
        self.start = time.time()
        loaded = self.count*self.block
        progress = round(loaded/self.size, 4)
        if loaded >= self.size:
            print u'%s下載完成\r\n'%self.file_name
        else:
            print u'{0}下載進度{1:.2f}{2}/{3:.2f}{4} {5:.2%} 下載速度{6:.2f}{7}/s'.\
                  format(self.file_name, loaded, self.unit,\
                  self.size, self.unit, progress, speed, self.unit)
            print '%50s'%('/'*int((1-progress)*50))


queue = Queue.Queue()


def run():
    while True:
        url = queue.get(timeout=100)
        if url is None:
            print u'全下完啦'
            break
        h = hashlib.md5()
        h.update(url)
        name = h.hexdigest()
        path = 'e:/download/' + name + '.mp4'
        download_file(url, path)


def get_url():
    queue.put(None)


if __name__ == '__main__':
    get_url()
    for i in xrange(4):
        t = threading.Thread(target=run)
        t.daemon = True
        t.start()

加了重複下載的判斷,至於怎麼源源不斷的生產url,諸位摸索吧,保重身體!

相關文章
相關標籤/搜索