【大數據】爬取所有的校園新聞

做業來自於:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE2/homework/2941php

1.重新聞url獲取新聞詳情: 字典,anewshtml

2.從列表頁的url獲取新聞url:列表append(字典) alistsql

3.生成所頁列表頁的url並獲取所有新聞 :列表extend(列表) allnews數據庫

*每一個同窗爬學號尾數開始的10個列表頁api

4.設置合理的爬取間隔app

import timedom

import randomurl

time.sleep(random.random()*3)spa

5.用pandas作簡單的數據處理並保存excel

保存到csv或excel文件 

newsdf.to_csv(r'F:\duym\爬蟲\gzccnews.csv')

保存到數據庫

import sqlite3
with sqlite3.connect('gzccnewsdb.sqlite') as db:
    newsdf.to_sql('gzccnewsdb',db)

代碼實現:

# 獲取一篇新聞的所有信息
import re
import requests
import time
import random
import pandas as pd
from bs4 import BeautifulSoup
from datetime import datetime

# 獲取新聞id
def newsnum(url):
    newsid = re.match('http://news.gzcc.cn/html/2015/xiaoyuanxinwen_(.*)/(.*).html', url).group(2)
    return newsid

# 發佈時間:datetime類型
def newstime(soup):
    newsdate = soup.select('.show-info')[0].text.split()[0].split(':')[1]
    newstime = soup.select('.show-info')[0].text.split()[1]
    time = newsdate + ' ' + newstime
    time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
    return time

# 獲取點擊次數
def getClick(id):
    clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(id)
    res = requests.get(clickUrl)
    click = res.text.split('.html')[-1].lstrip("('").rstrip("');")
    return click

# 獲取新聞詳情
def getDetails(url):
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    anews = {}
    id = newsnum(url);  #新聞id
    anews['新聞編號'] = id;
    anews['標題'] = soup.select('.show-title')[0].text;
    anews['發佈時間'] = str(newstime(soup));
    anews['做者'] = re.match('做者:(.*)',soup.select('.show-info')[0].text.split()[2]).group(1);
    #anews['來源'] = re.match('來源:(.*)', soup.select('.show-info')[0].text.split()[3]).group(1);
    anews['點擊次數'] = getClick(id);
    anews['url'] = url;
    # anews['內容'] = soup.select('.show-content p')[0].text;
    # print(anews)
    return anews

# 從列表頁的url獲取新聞url
def getUrl(url):
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    alist = []
    for i in range(len(soup.select('.news-list')[0].select('a'))):
        list = soup.select('.news-list')[0].select('a')[i]['href']
        alist.append(list)
    return alist

url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/74.html'
allnews = []
for i in getUrl(url):
    allnews.append(getDetails(i));
    time.sleep(random.random() * 3);

# 生成csv文件
newsdf=pd.DataFrame(allnews)
newsdf.to_csv(r'gzccnews.csv',encoding = 'utf-8')

#生成數據庫文件
import sqlite3
with sqlite3.connect('gzccnewsdb.sqlite') as db:
    newsdf.to_sql('gzccnewsdb',db)

print(allnews)

生成的csv文件:

生成的sqlite文件:

相關文章
相關標籤/搜索