1. 將新聞的正文內容保存到文本文件html
f=open("content.text",‘a’,encoding='utf-8') f.write(content) f.close()
2. 將新聞數據結構化爲字典的列表:數據結構
def gzcc_content_info(content_url): content_info = {} resp = requests.get(content_url) resp.encoding = 'utf-8' soup = BeautifulSoup(resp.text, 'html.parser') match_str = {'author': '做者:(.*)\s+[審覈]?', 'examine': '審覈:(.*)\s+[來源]?', 'source': '來源:(.*)\s+[攝影]?', \ 'photography': '攝影:(.*)\s+[點擊]'} remarks = soup.select('.show-info')[0].text for i in match_str: if re.match('.*' + match_str[i], remarks): content_info[i] = re.search(match_str[i], remarks).group(1).split("\xa0")[0] else: content_info[i] = " " time = re.search('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}', remarks).group() content_info['time'] = datetime.strptime(time, '%Y-%m-%d %H:%M:%S') content_info['title'] = soup.select('.show-title')[0].text content_info['url'] = content_url content_info['clicks'] = gzcc_content_clicks(content_url) return content_info def gzcc_list_page(page_url): page_news = [] res = requests.get(page_url) res.encoding = 'utf-8' soup = BeautifulSoup(res.text, 'html.parser') news_list = soup.select('.news-list')[0] news_point = news_list.select('li') for i in news_point: a = i.select('a')[0]['href'] page_news.append(gzcc_content_info(a)) return page_news all_news = [] url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/' res = requests.get(url) res.encoding = 'utf-8' soup = BeautifulSoup(res.text, 'html.parser') n = int(soup.select('#pages')[0].select("a")[-2].text) all_news.extend(gzcc_list_page(url)) for i in range(2, n): all_news.extend(gzcc_list_page('http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)))
3. 安裝pandas,用pandas.DataFrame(newstotal),建立一個DataFrame對象dfapp
df = pandas.DataFrame(all_news)
4. 經過df將提取的數據保存到csv或excel 文件。 函數
df.to_excel('news.xlsx'
5. 用pandas提供的函數和方法進行數據分析:url
df[['clicks', 'title', 'source']].head(6) df[(df['clicks'] > 3000) & (df['source'] == '學校綜合辦')] news_info = ['國際學院', '學生工做處'] df[df['source'].isin(news_info)]