1. 將新聞的正文內容保存到文本文件。html
import requests from bs4 import BeautifulSoup url = "http://news.gzcc.cn/html/xiaoyuanxinwen/" res = requests.get(url) res.encoding = "utf-8" soup = BeautifulSoup(res.text, "html.parser") f = open('gzccnews.txt', "a", encoding="utf-8") f.write(contents) f.close()
2. 將新聞數據結構化爲字典的列表:python
def getNewsDetail(url): resd = requests.get(url) resd.encoding = 'utf-8' soupd = BeautifulSoup(resd.text,'html.parser') # print(resd.text) news = {} news['title'] = soupd.select('.show-title')[0].text info = soupd.select('.show-info')[0].text news['time'] = datetime.strptime(info.lstrip('發佈時間:')[0:19],'%Y-%m-%d %H:%M:%S') if info.find('來源:')>0: news['source'] = info[info.find('來源:'):].split()[0].lstrip('來源:') else: news['source'] = 'none' news['clickCount'] = int(getClickCount(url)) news['newsUrl'] = url news['content'] = soupd.select('.show-content')[0].text.strip() writeNewsDetail(news['content']) return(news)
3. 安裝pandas,用pandas.DataFrame(newstotal),建立一個DataFrame對象df.mysql
import pandas newstotal = [{}] df = pandas.DataFrame(newstotal)
4. 經過df將提取的數據保存到csv或excel 文件。sql
import openpyxl df.to_excel('gzccnews.xlsx')
5. 用pandas提供的函數和方法進行數據分析:數據庫
print(df[['title','clickCount','source']][:6]) print(df[(df['clickCount']>3000)&(df['source']=='學校綜合辦')]) sou = ['國際學院','學生工做處'] print(df[df['source'].isin(sou)]) df1 = df.set_index('time') print(df1['2018-03'])
6. 保存到sqlite3數據庫數據結構
import sqlite3
with sqlite3.connect('gzccnewsdb.sqlite') as db:
df3.to_sql('gzccnews05',con = db, if_exists='replace')app
import sqlite3 with sqlite3.connect('gzccnewsdb.sqlite') as db: df3.to_sql('gzccnews05',con = db, if_exists='replace')
7. 從sqlite3讀數據函數
with sqlite3.connect('gzccnewsdb.sqlite') as db:
df2 = pandas.read_sql_query('SELECT * FROM gzccnews05',con=db)
print(df2)url
with sqlite3.connect('gzccnewsdb.sqlite') as db: df2 = pandas.read_sql_query('SELECT * FROM gzccnews05',con=db) print(df2)
8. df保存到mysql數據庫excel
安裝SQLALchemy
安裝PyMySQL
MySQL裏建立數據庫:create database gzccnews charset utf8;
import pymysql
from sqlalchemy import create_engine
conn = create_engine('mysql+pymysql://root:root@localhost:3306/gzccnews?charset=utf8')
pandas.io.sql.to_sql(df, 'gzccnews', con=conn, if_exists='replace')
MySQL裏查看已保存了數據。(經過MySQL Client或Navicate。)
import pymysql from sqlalchemy import create_engine conn = create_engine('mysql+pymysql://root:root@localhost:3306/gzccnews?charset=utf8') pandas.io.sql.to_sql(df, 'gzccnews', con=conn, if_exists='replace')