獲取所有校園新聞

1.取出一個新聞列表頁的所有新聞 包裝成函數。php

2.獲取總的新聞篇數,算出新聞總頁數。html

3.獲取所有新聞列表頁的所有新聞詳情。正則表達式

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re


# 獲取新聞點擊次數
def getNewsId(url):
    newsId = re.findall(r'\_(.*).html', url)[0][-4:]
    clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
    clickRes = requests.get(clickUrl)
    # 利用正則表達式獲取新聞點擊次數
    clickCount = int(re.search("hits'\).html\('(.*)'\);", clickRes.text).group(1))
    return clickCount


# 獲取新聞細節
def getNewsDetail(newsUrl):
    resd = requests.get(newsUrl)
    resd.encoding = 'utf-8'
    soupd = BeautifulSoup(resd.text, 'html.parser')

    content = soupd.select('#content')[0].text
    info = soupd.select('.show-info')[0].text
    # 調用getNewsId()獲取點擊次數
    count = getNewsId(newsUrl)
    # 識別時間格式
    date = re.search('(\d{4}.\d{2}.\d{2}\s\d{2}.\d{2}.\d{2})', info).group(1)
    # 識別一個至三個數據
    if(info.find('做者:')>0):
        author = re.search('做者:((.{2,4}\s|.{2,4}、){1,3})', info).group(1)
    if(info.find('審覈:')>0):
        check = re.search('審覈:((.{2,4}\s){1,3})', info).group(1)
    if(info.find('來源:')>0):
        sources = re.search('來源:(.*)\s*攝|點', info).group(1)
    # 用datetime將時間字符串轉換爲datetime類型
    dateTime = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
    # 利用format對字符串進行操做
    print('發佈時間:{0}\n做者:{1}\n審覈:{2}\n來源:{3}\n點擊次數:{4}'.format(dateTime, author, check, sources, count))
    print(content)


def getListPage(listUrl):
    res = requests.get(listUrl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')

    for new in soup.select('li'):
        if len(new.select('.news-list-title')) > 0:
            title = new.select('.news-list-title')[0].text
            description = new.select('.news-list-description')[0].text
            newsUrl = new.select('a')[0]['href']

            print('標題:{0}\n內容:{1}\n連接:{2}'.format(title, description, newsUrl))
            # 調用getNewsDetail()獲取新聞詳情
            getNewsDetail(newsUrl)
            break


listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
getListPage(listUrl)
res = requests.get(listUrl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
listCount = int(soup.select('.a1')[0].text.rstrip(''))//10+1

for i in range(2,listCount):
    listUrl= 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
    getListPage(listUrl)

4.找一個本身感興趣的主題,進行數據爬取,並進行分詞分析。不能與其它同窗雷同。api

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import jieba
 
newsurl = 'http://tv.cctv.com/cctv5/'
 
 
def sort(text):
    str = '''一!「」,。?;’"',.、:\n'''
    for s in str:
        text = text.replace(s, ' ')
    wordlist = list(jieba.cut(text))
    exclude = {'', '\u3000', '\r', '\xa0', '', '_', ' ', '', '', '', '', '', '', '', '', '', '(', ')'}
    set2 = set(wordlist) - exclude
    dict = {}
    for key in set2:
        dict[key] = wordlist.count(key)
    dictlist = list(dict.items())
    dictlist.sort(key=lambda x: x[1], reverse=True)
    print("top5關鍵詞:")
    for i in range(5):
        print(dictlist[i])
 
 
def getContent(url):
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup2 = BeautifulSoup(res.text, 'html.parser')
    for news in soup2.select('.l_a'):
        if len(news.select('.author'))>0:
            author=news.select('.author')[0].text
            print("做者",author)
    content = soup2.select('.la_con')[0].text.rstrip('AD_SURVEY_Add_AdPos("7000531");')
    print("正文:", content)
    sort(content)
 
 
def getNewDetails(newsurl):
    res = requests.get(newsurl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    for news in soup.select('.item'):
        #  print(news)
        title = news.select('a')[0].attrs['title']
        a = news.select('a')[0].attrs['href']
        brief = news.select('h5')[0].text.rstrip('[詳細]')
        time = news.select('h6')[0].text
        dt = datetime.strptime(time, '%Y-%m-%d %H:%M')
        print("新聞標題:", title)
        print("連接:", a)
        print("內容簡介:", brief)
        print("時間:", dt)
        getContent(a)
        print('\n')
    # break
 
 
res = requests.get(newsurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
getNewDetails(newsurl)
相關文章
相關標籤/搜索
本站公眾號
   歡迎關注本站公眾號,獲取更多信息