python實訓day8

今天是實訓的第八天,主要仍是圍繞爬蟲來說的。html

今日筆記:python

1.解析庫之bs4web

''''''

'''

pip3 install beautifulsoup4  # 安裝bs4

pip3 install lxml  # 下載lxml解析器

'''

html_doc = """

<html><head><title>The Dormouse's story</title></head>

<body>

<p class="sister"><b>$37</b></p>

<p class="story" id="p">Once upon a time there were three little sisters; and their names were

<a href="http://example.com/elsie" class="sister" >Elsie</a>,

<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and

<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;

and they lived at the bottom of a well.</p>

 

<p class="story">...</p>

"""

 

# 從bs4中導入BeautifulSoup

from bs4 import BeautifulSoup

 

# 調用BeautifulSoup實例化獲得一個soup對象

# 參數一: 解析文本

# 參數二:

# 參數二: 解析器(html.parser、lxml...)

soup = BeautifulSoup(html_doc, 'lxml')

 

print(soup)

print('*' * 100)

print(type(soup))

print('*' * 100)

# 文檔美化

html = soup.prettify()

print(html)

  2.bs之遍歷文檔樹:mongodb

html_doc = """<html><head><title>The Dormouse's story</title></head><body><p class="sister"><b>$37</b></p><p class="story" id="p">Once upon a time there were three little sisters; and their names were<b>tank</b><a href="http://example.com/elsie" class="sister" >Elsie</a>,<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;and they lived at the bottom of a well.<hr></hr></p><p class="story">...</p>"""

 

from bs4 import BeautifulSoup

soup = BeautifulSoup(html_doc, 'lxml')

 

'''

遍歷文檔樹:

    一、直接使用

    二、獲取標籤的名稱

    三、獲取標籤的屬性

    四、獲取標籤的內容

    五、嵌套選擇

    六、子節點、子孫節點

    七、父節點、祖先節點

    八、兄弟節點

'''

 

# 一、直接使用

print(soup.p)  # 查找第一個p標籤

print(soup.a)  # 查找第一個a標籤

 

# 二、獲取標籤的名稱

print(soup.head.name)  # 獲取head標籤的名稱

 

# 三、獲取標籤的屬性

print(soup.a.attrs)  # 獲取a標籤中的全部屬性

print(soup.a.attrs['href'])  # 獲取a標籤中的href屬性

 

# 四、獲取標籤的內容

print(soup.p.text)  # $37

 

# 五、嵌套選擇

print(soup.html.head)

 

# 六、子節點、子孫節點

print(soup.body.children)  # body全部子節點,返回的是迭代器對象

print(list(soup.body.children))  # 強轉成列表類型

 

print(soup.body.descendants)  # 子孫節點

print(list(soup.body.descendants))  # 子孫節點

 

#  七、父節點、祖先節點

print(soup.p.parent)  # 獲取p標籤的父親節點

# 返回的是生成器對象

print(soup.p.parents)  # 獲取p標籤全部的祖先節點

print(list(soup.p.parents))

 

# 八、兄弟節點

# 找下一個兄弟

print(soup.p.next_sibling)

# 找下面全部的兄弟,返回的是生成器

print(soup.p.next_siblings)

print(list(soup.p.next_siblings))

 

# 找上一個兄弟

print(soup.a.previous_sibling)  # 找到第一個a標籤的上一個兄弟節點

# 找到a標籤上面的全部兄弟節點

print(soup.a.previous_siblings)  # 返回的是生成器

print(list(soup.a.previous_siblings))

  3.bs之搜索文檔樹:數據庫

''''''

html_doc = """<html><head><title>The Dormouse's story</title></head><body><p class="sister"><b>$37</b></p><p class="story" id="p">Once upon a time there were three little sisters; and their names were<b>tank</b><a href="http://example.com/elsie" class="sister" >Elsie</a>,<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;and they lived at the bottom of a well.<hr></hr></p><p class="story">...</p>"""

'''

搜索文檔樹:

    find()  找一個  

    find_all()  找多個

     

標籤查找與屬性查找:

    標籤:

            name 屬性匹配

            attrs 屬性查找匹配

            text 文本匹配

             

        - 字符串過濾器   

            字符串全局匹配

 

        - 正則過濾器

            re模塊匹配

 

        - 列表過濾器

            列表內的數據匹配

 

        - bool過濾器

            True匹配

 

        - 方法過濾器

            用於一些要的屬性以及不須要的屬性查找。

 

    屬性:

        - class_

        - id

'''

 

from bs4 import BeautifulSoup

soup = BeautifulSoup(html_doc, 'lxml')

 

# # 字符串過濾器

# # name

p_tag = soup.find(name='p')

print(p_tag)  # 根據文本p查找某個標籤

# 找到全部標籤名爲p的節點

tag_s1 = soup.find_all(name='p')

print(tag_s1)

#

#

# # attrs

# # 查找第一個class爲sister的節點

p = soup.find(attrs={"class": "sister"})

print(p)

# # 查找全部class爲sister的節點

tag_s2 = soup.find_all(attrs={"class": "sister"})

print(tag_s2)

#

#

# # text

text = soup.find(text="$37")

print(text)

#

#

# # 配合使用:

# # 找到一個id爲link二、文本爲Lacie的a標籤

a_tag = soup.find(name="a", attrs={"id": "link2"}, text="Lacie")

print(a_tag)

 

 

 

# # 正則過濾器

import re

# name

p_tag = soup.find(name=re.compile('p'))

print(p_tag)

 

# 列表過濾器

import re

# name

tags = soup.find_all(name=['p', 'a', re.compile('html')])

print(tags)

 

# - bool過濾器

# True匹配

# 找到有id的p標籤

p = soup.find(name='p', attrs={"id": True})

print(p)

 

# 方法過濾器

# 匹配標籤名爲a、屬性有id沒有class的標籤

def have_id_class(tag):

    if tag.name == 'a' and tag.has_attr('id') and tag.has_attr('class'):

        return tag

 

tag = soup.find(name=have_id_class)

print(tag)

  4.爬取豌豆莢json

主頁:

    圖標地址、下載次數、大小、詳情頁地址

 

詳情頁:

    遊戲名、圖標名、好評率、評論數、小編點評、簡介、網友評論、1-5張截圖連接地址、下載地址

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=1&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

 

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=2&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

 

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=3&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

 

32

'''

import requests

from bs4 import BeautifulSoup

# 一、發送請求

def get_page(url):

    response = requests.get(url)

    return response

 

# 二、開始解析

# 解析主頁

def parse_index(data):

    soup = BeautifulSoup(data, 'lxml')

 

    # 獲取全部app的li標籤

    app_list = soup.find_all(name='li', attrs={"class": "card"})

    for app in app_list:

        # print('tank *' * 1000)

        # print(app)

        # 圖標地址

        img = app.find(name='img').attrs['data-original']

        print(img)

 

        # 下載次數

        down_num = app.find(name='span', attrs={"class": "install-count"}).text

        print(down_num)

 

        import re

        # 大小

        size = soup.find(name='span', text=re.compile("\d+MB")).text

        print(size)

 

        # 詳情頁地址

        detail_url = soup.find(name='a', attrs={"class": "detail-check-btn"}).attrs['href']

        print(detail_url)

 

 

def main():

    for line in range(1, 33):

        url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B"

 

        # 一、往app接口發送請求

        response = get_page(url)

        # print(response.text)

        print('*' * 1000)

        # 反序列化爲字典

        data = response.json()

        # 獲取接口中app標籤數據

        app_li = data['data']['content']

        # print(app_li)

        # 二、解析app標籤數據

        parse_index(app_li)

 

 

if __name__ == '__main__':

    main() 
主頁:

    圖標地址、下載次數、大小、詳情頁地址

 

詳情頁:

    遊戲名、好評率、評論數、小編點評、下載地址、簡介、網友評論、1-5張截圖連接地址、

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=1&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

 

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=2&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

 

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=3&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

 

32

'''

import requests

from bs4 import BeautifulSoup

# 一、發送請求

def get_page(url):

    response = requests.get(url)

    return response

 

# 二、開始解析

# 解析詳情頁

def parse_detail(text):

    soup = BeautifulSoup(text, 'lxml')

    # print(soup)

 

    # app名稱

    name = soup.find(name="span", attrs={"class": "title"}).text

    # print(name)

 

    # 好評率

    love = soup.find(name='span', attrs={"class": "love"}).text

    # print(love)

 

    # 評論數

    commit_num = soup.find(name='a', attrs={"class": "comment-open"}).text

    # print(commit_num)

 

    # 小編點評

    commit_content = soup.find(name='div', attrs={"class": "con"}).text

    # print(commit_content)

 

    # app下載連接

    download_url = soup.find(name='a', attrs={"class": "normal-dl-btn"}).attrs['href']

    # print(download_url)

 

    print(

        f'''

        ============= tank ==============

        app名稱:{name}

        好評率: {love}

        評論數: {commit_num}

        小編點評: {commit_content}

        app下載連接: {download_url}

        ============= end ==============

        '''

    )

 

 

 

# 解析主頁

def parse_index(data):

    soup = BeautifulSoup(data, 'lxml')

 

    # 獲取全部app的li標籤

    app_list = soup.find_all(name='li', attrs={"class": "card"})

    for app in app_list:

        # print(app)

        # print('tank' * 1000)

        # print('tank *' * 1000)

        # print(app)

        # 圖標地址

        # 獲取第一個img標籤中的data-original屬性

        img = app.find(name='img').attrs['data-original']

        print(img)

 

        # 下載次數

        # 獲取class爲install-count的span標籤中的文本

        down_num = app.find(name='span', attrs={"class": "install-count"}).text

        print(down_num)

 

        import re

        # 大小

        # 根據文本正則獲取到文本中包含 數字 + MB(\d+表明數字)的span標籤中的文本

        size = soup.find(name='span', text=re.compile("\d+MB")).text

        print(size)

 

        # 詳情頁地址

        # 獲取class爲detail-check-btn的a標籤中的href屬性

        # detail_url = soup.find(name='a', attrs={"class": "name"}).attrs['href']

        # print(detail_url)

 

        # 詳情頁地址

        detail_url = app.find(name='a').attrs['href']

        print(detail_url)

 

        # 三、往app詳情頁發送請求

        response = get_page(detail_url)

 

        # 四、解析app詳情頁

        parse_detail(response.text)

 

 

def main():

    for line in range(1, 33):

        url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B"

 

        # 一、往app接口發送請求

        response = get_page(url)

        # print(response.text)

        print('*' * 1000)

        # 反序列化爲字典

        data = response.json()

 

        # 獲取接口中app標籤數據

        app_li = data['data']['content']

        # print(app_li)

        # 二、解析app標籤數據

        parse_index(app_li)

 

 

if __name__ == '__main__':

    main() 

 5.mongoDB的簡單使用:api

MongoDB 非關係型數據庫
一 安裝與使用
一、下載安裝
https://www.mongodb.com/download-center/communityapp

二、在C盤建立一個data/db文件夾
- 數據的存放路徑函數

三、mongod啓動服務
進入終端,輸入mongod啓動mongoDB服務。url

四、mongo進入mongoDB客戶端
打開一個新的終端,輸入mongo進入客戶端

二 數據庫操做

數據庫操做:
切換庫:
SQL:
use admin; 有則切換,無則報錯。

MongoDB:
use tank; 有則切換,無則建立,並切換tank庫中。

查數據庫:
SQL:
show databases;

MongoDB:
show dbs;
顯示的數據庫若無數據,則不顯示。

刪除庫:
SQL:
drop database

MongoDB:
db.dropDatabase()


集合操做: MySQL中叫作表。
建立集合:
SQL:
create table f1, f2...

MongoDB:
# 在當前庫中經過.來建立集合
db.student

插入數據:
# 插入多條數據
db.student.insert([{"name1": "tank1"}, {"name2": "tank2"}])

# 插入一條
db.student.insert({"name": "tank"})


查數據:
# 查找student集合中全部數據
db.student.find({})

# 查一條 查找name爲tank的記錄
db.student.find({"name":"tank"})

三 python連接MongoDB
一、下載第三方模塊pymongo
pip3 install pymongo

二、連接mongoDB客戶端
client = MongoClient('localhost', 27017)

6.pymongo簡單使用:

from pymongo import MongoClient

 

# 一、連接mongoDB客戶端

# 參數1: mongoDB的ip地址

# 參數2: mongoDB的端口號 默認:27017

client = MongoClient('localhost', 27017)

# print(client)

 

# 二、進入tank_db庫,沒有則建立

# print(client['tank_db'])

 

# 三、建立集合

# print(client['tank_db']['people'])

 

# 四、給tank_db庫插入數據

 

# 1.插入一條

data1 = {

    'name': 'tank',

    'age': 18,

    'sex': 'male'

}

client['tank_db']['people'].insert(data1)

 

# 2.插入多條

data1 = {

    'name': 'tank',

    'age': 18,

    'sex': 'male'

}

data2 = {

    'name': '李子恆',

    'age': 84,

    'sex': 'female'

}

data3 = {

    'name': '張庭宇',

    'age': 73,

    'sex': 'male'

}

client['tank_db']['people'].insert([data1, data2, data3])

#

# # 五、查數據

# # 查看全部數據

data_s = client['tank_db']['people'].find()

print(data_s)  # <pymongo.cursor.Cursor object at 0x000002EEA6720128>

# # 須要循環打印全部數據

for data in data_s:

    print(data)

 

# # 查看一條數據

data = client['tank_db']['people'].find_one()

print(data)

 

# 官方推薦使用

# 插入一條insert_one

client['tank_db']['people'].insert_one()

# 插入多條insert_many

client['tank_db']['people'].insert_many() 

  二.做業

把豌豆莢爬取的數據插入mongoDB中
- 建立一個wandoujia庫
- 把主頁的數據存放一個名爲index集合中
- 把詳情頁的數據存放一個名爲detail集合中

'''
主頁:
    圖標地址、下載次數、大小、詳情頁地址
 
詳情頁:
    遊戲名、好評率、評論數、小編點評、下載地址、簡介、網友評論、1-5張截圖連接地址、
https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=1&ctoken=FRsWKgWBqMBZLdxLaK4iem9B
 
https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=2&ctoken=FRsWKgWBqMBZLdxLaK4iem9B
 
https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=3&ctoken=FRsWKgWBqMBZLdxLaK4iem9B
 
32
'''
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
'''
三、把豌豆莢爬取的數據插入mongoDB中
    - 建立一個wandoujia庫
        - 把主頁的數據存放一個名爲index集合中
        - 把詳情頁的數據存放一個名爲detail集合中
'''
# 鏈接MongoDB客戶端
client = MongoClient('localhost', 27017)
# 建立或選擇wandoujia庫,index集合
index_col = client['wandoujia']['index']
# 建立或選擇wandoujia庫,detail集合
detail_col = client['wandoujia']['detail']
 
# 一、發送請求
def get_page(url):
    response = requests.get(url)
    return response
 
 
# 二、開始解析
# 解析詳情頁
def parse_detail(text):
 
    soup = BeautifulSoup(text, 'lxml')
    # print(soup)
 
    # app名稱
    try:
        name = soup.find(name="span", attrs={"class": "title"}).text
    except Exception:
        # 如有異常,設置爲None
        name = None
    # print(name)
 
    # 好評率
    try:
        love = soup.find(name='span', attrs={"class": "love"}).text
 
    except Exception:
        love = None
    # print(love)
 
    # 評論數
    try:
        commit_num = soup.find(name='a', attrs={"class": "comment-open"}).text
    except Exception:
        commit_num = None
    # print(commit_num)
 
    # 小編點評
    try:
        commit_content = soup.find(name='div', attrs={"class": "con"}).text
    except Exception:
        commit_content = None
    # print(commit_content)
 
    # app下載連接
 
    try:
        download_url = soup.find(name='a', attrs={"class": "normal-dl-btn"}).attrs['href']
    except Exception:
        # 如有異常,設置爲None
        download_url = None
 
    # print(download_url)
 
    # print(
    #     f'''
    #     ============= tank ==============
    #     app名稱:{name}
    #     好評率: {love}
    #     評論數: {commit_num}
    #     小編點評: {commit_content}
    #     app下載連接: {download_url}
    #     ============= end ==============
    #     '''
    # )
 
    # 判斷全部數據都存在,正常賦值
    if name and love and commit_num and commit_content and download_url :
        detail_data = {
            'name': name,
            'love': love,
            'commit_num': commit_num,
            'commit_content': commit_content,
            'download_url': download_url
        }
 
    # 若love沒有值,則設置爲 沒人點贊,很慘
    if not love:
        detail_data = {
            'name': name,
            'love': "沒人點贊,很慘",
            'commit_num': commit_num,
            'commit_content': commit_content,
            'download_url': download_url
        }
    # 若download_url沒有值,則設置爲 沒有安裝包
    if not download_url:
        detail_data = {
            'name': name,
            'love': love,
            'commit_num': commit_num,
            'commit_content': commit_content,
            'download_url': '沒有安裝包'
        }
 
 
 
    # 插入詳情頁數據
    detail_col.insert(detail_data)
    print(f'{name}app數據插入成功!')
 
# 解析主頁
def parse_index(data):
    soup = BeautifulSoup(data, 'lxml')
 
    # 獲取全部app的li標籤
    app_list = soup.find_all(name='li', attrs={"class": "card"})
    for app in app_list:
        # print(app)
        # print('tank' * 1000)
        # print('tank *' * 1000)
        # print(app)
        # 圖標地址
        # 獲取第一個img標籤中的data-original屬性
        img = app.find(name='img').attrs['data-original']
        # print(img)
 
        # 下載次數
        # 獲取class爲install-count的span標籤中的文本
        down_num = app.find(name='span', attrs={"class": "install-count"}).text
        # print(down_num)
 
        import re
        # 大小
        # 根據文本正則獲取到文本中包含 數字 + MB(\d+表明數字)的span標籤中的文本
        size = soup.find(name='span', text=re.compile("\d+MB")).text
        # print(size)
 
        # 詳情頁地址
        # 獲取class爲detail-check-btn的a標籤中的href屬性
        # detail_url = soup.find(name='a', attrs={"class": "name"}).attrs['href']
        # print(detail_url)
 
        # 詳情頁地址
        detail_url = app.find(name='a').attrs['href']
        # print(detail_url)
 
        # 拼接數據
        index_data = {
            'img': img,
            'down_num': down_num,
            'size': size,
            'detail_url': detail_url
        }
 
        # 插入數據
        index_col.insert(index_data)
        print('主頁數據插入成功!')
 
        # 三、往app詳情頁發送請求
        response = get_page(detail_url)
 
        # 四、解析app詳情頁
        parse_detail(response.text)
 
 
def main():
    for line in range(1, 33):
        url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B"
 
        # 一、往app接口發送請求
        response = get_page(url)
        # print(response.text)
        print('*' * 1000)
        # 反序列化爲字典
        data = response.json()
 
        # 獲取接口中app標籤數據
        app_li = data['data']['content']
        # print(app_li)
 
        # 二、解析app標籤數據
        parse_index(app_li)
 
        # 執行完全部函數關閉mongoDB客戶端
        client.close()
 
if __name__ == '__main__':
    main()
相關文章
相關標籤/搜索