1.重新聞url獲取新聞詳情: 字典,anewsphp
import requests from bs4 import BeautifulSoup from datetime import datetime import re def click(url): id=re.findall('(\d{1,5})',url)[-1] clickUrl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(id) resClick=requests.get(clickUrl) newsClick=int(resClick.text.split('.html')[-1].lstrip("('").rstrip("');")) return newsClick def newsdt(showinfo): newsDate=showinfo.split()[0].split(':')[1] newsTime=showinfo.split()[1] newsDT=newsDate+' '+newsTime dt=datetime.strptime(newsDT,'%Y-%m-%d %H:%M:%S') return dt def anews(url): newsDetail={} res=requests.get(url) res.encoding="utf-8" soup=BeautifulSoup(res.text,"html.parser") newsDetail['nenewsTitle']=soup.select('.show-title')[0].text showinfo=soup.select('.show-info')[0].text newsDetail['newsDT']=newsdt(showinfo) newsDetail['newsClick']=click(newsUrl) return newsDetail newsUrl='http://news.gzcc.cn/html/2019/xiaoyuanxinwen_0403/11147.html' anews(newsUrl) res=requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/') res.encoding="utf-8" soup=BeautifulSoup(res.text,'html.parser') for news in soup.select('li'): if len(news.select('.news-list-title'))>0: newsUrl=news.select('a')[0]['href'] print(anews(newsUrl))
2.從列表頁的url獲取新聞url:列表append(字典) alisthtml
listUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/' res=requests.get(listUrl) res.encoding="utf-8" soup=BeautifulSoup(res.text,'html.parser') for n in soup.select('li'): if n.select('.news-list-title'): print(n.select('a')[0]['href'])
3.生成所頁列表頁的url並獲取所有新聞 :列表extend(列表) allnewssql
*每一個同窗爬學號尾數開始的10個列表頁數據庫
def alist(url): res=requests.get(listUrl) res.encoding='utf-8' soup = BeautifulSoup(res.text,'html.parser') newsList=[] for news in soup.select('li'): if len(news.select('.news-list-title'))>0: newsUrl=news.select('a')[0]['href'] newsDesc=news.select('.news-list-description')[0].text newsDict=anews(newsUrl) newsDict['description']=newsDesc newsList.append(newsDict) return newsList listUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/' alist(listUrl) allnews=[] for i in range(103,113): listUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i) allnews.extend(alist(listUrl)) len(allnews)
4.設置合理的爬取間隔api
5.用pandas作簡單的數據處理並保存app
保存到csv或excel文件url
newsdf.to_csv(r'C:\Users\Administrator\Desktop\hjy.csv')
保存到數據庫spa
import sqlite3 with sqlite3.connect('news_hjy.sqlite') as db: newsdf.to_sql('news_hjy',db)