python+BeautifulSoup+多進程爬取糗事百科圖片

用到的庫;css

import requests
import os
from bs4 import BeautifulSoup
import time
from multiprocessing import Pool

定義圖片存儲路徑;瀏覽器

path = r'E:\爬蟲\0805\\'

請求頭,模擬瀏覽器請求;框架

在瀏覽器中的位置,按f12打開開發者模式;scrapy

 

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

 主函數;函數

def get_images(url):
    data = 'https:'
    res = requests.get(url,headers=headers)
    soup = BeautifulSoup(res.text,'lxml')
    url_infos = soup.select('div.thumb > a > img')
    # print(url_infos)
    for url_info in url_infos:
        try:
            urls = data+url_info.get('src')
            if os.path.exists(path+urls.split('/')[-1]):
                print('圖片已下載')
            else:
                image = requests.get(urls,headers=headers)
                with open(path+urls.split('/')[-1],'wb') as fp:
                    fp.write(image.content)
                print('正在下載:'+urls)
                time.sleep(0.5)
        except Exception as e:
            print(e)

開始爬蟲程序;url

if __name__ == '__main__':
    # 路由列表
    urls = ['https://www.qiushibaike.com/imgrank/page/{}/'.format(i) for i in range(1,14)]
    # 開啓多進程爬取
    pool = Pool()
    pool.map(get_images,urls)
    print('抓取完畢')

爬取中;spa

打開文件夾查看爬取結果;code

 

doneorm

完整代碼;xml

import requests
import os
from bs4 import BeautifulSoup
import time
from multiprocessing import Pool
"""
************經常使用爬蟲庫***********
       requests
       BeautifulSoup
       pyquery 
       lxml
************爬蟲框架***********
       scrapy
       三大解析方式:re,css,xpath
"""
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}
path = r'E:\爬蟲\0805\\'
def get_images(url):
    data = 'https:'
    res = requests.get(url,headers=headers)
    soup = BeautifulSoup(res.text,'lxml')
    url_infos = soup.select('div.thumb > a > img')
    # print(url_infos)
    for url_info in url_infos:
        try:
            urls = data+url_info.get('src')
            if os.path.exists(path+urls.split('/')[-1]):
                print('圖片已下載')
            else:
                image = requests.get(urls,headers=headers)
                with open(path+urls.split('/')[-1],'wb') as fp:
                    fp.write(image.content)
                print('正在下載:'+urls)
                time.sleep(0.5)
        except Exception as e:
            print(e)

if __name__ == '__main__':
    # 路由列表
    urls = ['https://www.qiushibaike.com/imgrank/page/{}/'.format(i) for i in range(1,14)]
    # 開啓多進程爬取
    pool = Pool()
    pool.map(get_images,urls)
    print('抓取完畢')
相關文章
相關標籤/搜索