機器學習參考篇: python+sklearn+kaggle機器學習
用python+sklearn(機器學習)實現天氣預報數據 數據
用python+sklearn(機器學習)實現天氣預報 準備
用python+sklearn(機器學習)實現天氣預報 模型和使用html5
# 爬取數據連接 url = "http://www.meteomanz.com/sy2?l=1&cou=2250&ind=59287&d1=" + str(week_ago.day).zfill(2) + "&m1=" + str(week_ago.month).zfill(2) + "&y1=" + str(week_ago.month) + "&d2=" + str(week_pre.day - years[0]).zfill(2) + "&m2=" + str(week_pre.month).zfill(2) + "&y2=" + str(week_pre.year - years[1])
改爲python
# 爬取數據連接 url = "http://www.meteomanz.com/sy2?l=1&cou=2250&ind=59287&d1=" + str(week_ago.day).zfill(2) + "&m1=" + str( week_ago.month).zfill(2) + "&y1=" + str(week_ago.year - years[0]) + "&d2=" + str(week_pre.day).zfill(2) + "&m2=" + str(week_pre.month).zfill(2) + "&y2=" + str(week_pre.year - years[1])
在上一篇教程裏咱們已經知道了數據來源網頁的規則,因此這一篇就講數據如何用爬蟲獲取和機器學習的數據預處理階段git
爬蟲這方面能夠參考我以前的一篇文章github
首先咱們主要要爬取去年今日的半個月前到去年今日,而根據上一篇咱們得出的網址規則,咱們能夠獲得(PS:真正的連接裏是沒有換行的)web
http://www.meteomanz.com/sy2?l=1&cou=2250&ind=59287 &d1=去年今日的半個月前的日 &m1=去年今日的半個月前的月份 &y1=去年年份 &d2=今天的日期的日 &m2=今天的日期的月份 &y2=今年年份
而爲何是取去年和時間要半個月呢?由於去年的天氣環境相比於前年或者更久以前是和咱們如今的天氣條件更類似的,能夠減小偏差,半個月而不是一個星期是由於使用多的數據量能夠減小偏差,不是一個月而是由於網站的限制,並且在實驗中也會增長少許的偏差。因此最終取用了去年和半個月的時間。數組
若是咱們是隻測今天這一次上面的網址就能夠人工填寫,可是若是咱們要作不用人工填就要用datetime
這個python庫
以下:bash
import datetime as DT # 取如今日期 today = DT.datetime.now() # 取b[0]天前日期 week_ago = (today - DT.timedelta(days=b[0])).date() # b[1]天后 week_pre = (today + DT.timedelta(days=b[1])).date()
咱們傳入b = [-15 0]
,就能夠獲取上個半月的日期在week_ago
裏,今天的日期在week_pre
裏
因此,能夠用這一行構建須要的網址app
# 爬取數據連接 url = "http://www.meteomanz.com/sy2?l=1&cou=2250&ind=59287&d1=" + str(week_ago.day).zfill(2) + "&m1=" + str( week_ago.month).zfill(2) + "&y1=" + str(week_ago.year - years[0]) + "&d2=" + str(week_pre.day).zfill(2) + "&m2=" + str(week_pre.month).zfill(2) + "&y2=" + str(week_pre.year - years[1])
其中.zfill(2)
是指填充2位,好比若是是1就返回01,若是是12就返回12
有了網址,接下來就是爬蟲爬取網頁而後分析網頁元素取出裏面的數據dom
首先先寫爬蟲部分,這部分很簡單,寫了個GetData
class
# -*- coding: utf-8 -*- # @Time: 2020/12/16 # @Author: Eritque arcus # @File: GetData.py # 功能: 爬取數據 import urllib3 class GetData: url = "" headers = "" def __init__(self, url, header=""): """ :param url: 獲取的網址 :param header: 請求頭,默認已內置 """ self.url = url if header == "": self.headers = { 'Connection': 'Keep-Alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,' '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, ' 'like Gecko) Chrome/87.0.4280.66 Mobile Safari/537.36 ', 'Host': 'www.meteomanz.com' } else: self.headers = header def Get(self): """ :return: 網址對應的網頁內容 """ http = urllib3.PoolManager() return http.request('GET', self.url, headers=self.headers).data
本處用了urllib3庫和GET方式,其中headers是申請頭,這部分能夠在按F12調出開發者工具,在Network那一欄,點擊任意一個事件,往下滑就有了,能夠用個人也能夠。請求頭主要是http協議裏的東西,想要了解能夠自行搜索。
本處使用了BeautifulSoup
庫
g = GetData(url).Get() # beautifulsoup解析網頁 soup = BeautifulSoup(g, "html5lib") # 取<tbody>內容 tb = soup.find(name="tbody") # 取tr內容 past_tr = tb.find_all(name="tr") for tr in past_tr: # 取tr內每一個td的內容 text = tr.find_all(name="td") flag = False for i in range(0, len(text)): if i == 0: text[i] = text[i].a.string # 網站bug,跨月請求的話會給每月第0天的數據,可是裏面是全空的由於日期不存在,好比 00/11/2020(日/月/年),因此要手動drop掉這個數據 if "00/" in text[i]: flag = True elif i == 8: # 把/8去掉,網頁顯示的格式問題 text[i] = text[i].string.replace("/8", "") elif i == 5: # 去掉Hpa單位 text[i] = text[i].string.replace(" Hpa", "") elif i == 6: # 用正則去掉風力裏括號內的內容 text[i] = re.sub(u"[º(.*?|N|W|E|S)]", "", text[i].string) else: # 取每一個元素的內容 text[i] = text[i].string # 丟失數據都取2(簡陋作法) # 這麼作 MAE=3.6021 text[i] = text[i].replace("-", "2") text[i] = text[i].replace("Tr", "2")
若是有什麼不清楚的評論裏答覆。
import csv # 建立文件對象 f = open(c, 'w', encoding='utf-8', newline='') # 基於文件對象構建 csv寫入對象 csv_writer = csv.writer(f) # 寫入內容,text數組 csv_writer.writerow(text) # 關閉文件 f.close()
Write.py
# -*- coding: utf-8 -*- # @Time: 2020/12/16 # @Author: Eritque arcus # @File: Write.py import re from bs4 import BeautifulSoup from GetData import GetData import datetime as DT import csv def a(t): return t.replace(" - ", "0") # 功能: 寫csv def write(years, b, c): """ :param years: [開始日期距離如今的年份, 結束日期距離如今的年份] :param b: [開始日期距離如今日期的天數, 結束日期距離如今日期的天數] :param c: csv文件名 :return: None """ # 1. 建立文件對象 f = open(c, 'w', encoding='utf-8', newline='') # 2. 基於文件對象構建 csv寫入對象 csv_writer = csv.writer(f) # 3. 構建列表頭 # , "negAve", "negMax", "negMin" csv_writer.writerow(["Time", "Ave_t", "Max_t", "Min_t", "Prec", "SLpress", "Winddir", "Windsp", "Cloud"]) # 取如今日期 today = DT.datetime.now() # 取20天前日期 week_ago = (today - DT.timedelta(days=b[0])).date() # 20天后 week_pre = (today + DT.timedelta(days=b[1])).date() # 城市id 廣州59287 青島 54857 id = "59287" # 爬取數據連接 url = "http://www.meteomanz.com/sy2?l=1&cou=2250&ind=" + id + "&d1=" + str(week_ago.day).zfill(2) + "&m1=" + str( week_ago.month).zfill(2) + "&y1=" + str(week_ago.year - years[0]) + "&d2=" + str(week_pre.day).zfill( 2) + "&m2=" + str(week_pre.month).zfill(2) + "&y2=" + str(week_pre.year - years[1]) # 顯示獲取數據集的網址 print(url) g = GetData(url).Get() # beautifulsoup解析網頁 soup = BeautifulSoup(g, "html5lib") # 取<tbody>內容 tb = soup.find(name="tbody") # 取tr內容 past_tr = tb.find_all(name="tr") for tr in past_tr: # 取tr內每一個td的內容 text = tr.find_all(name="td") flag = False negA = negMax = negMin = False for i in range(0, len(text)): if i == 0: text[i] = text[i].a.string # 網站bug,會給每月第0天,好比 00/11/2020,因此要drop掉 if "00/" in text[i]: flag = True elif i == 8: # 把/8去掉,網頁顯示的格式 text[i] = text[i].string.replace("/8", "") elif i == 5: # 去掉單位 text[i] = text[i].string.replace(" Hpa", "") elif i == 6: # 去掉風力裏括號內的內容 text[i] = re.sub(u"[º(.*?|N|W|E|S)]", "", text[i].string) else: # 取每一個元素的內容 text[i] = text[i].string # 丟失數據都取2(簡陋作法) # 這麼作 MAE=3.6021 text[i] = "2" if text[i] == "-" else text[i] text[i] = "2" if text[i] == "Tr" else text[i] text = text[0:9] # ext += [str(int(negA)), str(int(negMax)), str(int(negMin))] # 4. 寫入csv文件內容 if not flag: csv_writer.writerow(text) # 5. 關閉文件 f.close()
GetData.py
# -*- coding: utf-8 -*- # @Time: 2020/12/16 # @Author: Eritque arcus # @File: GetData.py # 功能: 爬取數據 import urllib3 class GetData: url = "" headers = "" def __init__(self, url, header=""): """ :param url: 獲取的網址 :param header: 請求頭,默認已內置 """ self.url = url if header == "": self.headers = { 'Connection': 'Keep-Alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,' '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, ' 'like Gecko) Chrome/87.0.4280.66 Mobile Safari/537.36 ', 'Host': 'www.meteomanz.com' } else: self.headers = header def Get(self): """ :return: 網址對應的網頁內容 """ http = urllib3.PoolManager() return http.request('GET', self.url, headers=self.headers).data
到時候就能夠直接用一行命令取得天氣數據了,以下面是取去年今日的20天到去年今日的天氣數據
# 用近幾年的數據作訓練集 # 如 [1,1], [20, 0]就是用2019年的今天的20天前到2019年的今天數據作訓練集 # 寫入csv Write([1, 1], [20, 0], "weather_train_train.csv")
結果以下
weather_train_train.csv
Time,Ave_t,Max_t,Min_t,Prec,SLpress,Winddir,Windsp,Cloud 07/12/2019,14.8,20.8,8.8,0.0,1026.3,331,11,0 06/12/2019,15.2,19.8,10.7,0.0,1026.6,344,15,0 05/12/2019,14.5,20.4,8.6,2,1026.2,346,13,8 04/12/2019,13.8,20.4,7.1,0.0,1024.7,335,16,2 03/12/2019,13.0,18.9,7.1,0.0,1024.8,330,10,0 02/12/2019,18.2,24.9,11.5,0.0,1024.8,347,18,3 01/12/2019,18.1,24.9,11.4,0.0,1020.9,332,16,1 30/11/2019,17.5,23.6,11.4,0.0,1020.5,352,8,3 29/11/2019,15.8,20.1,11.5,0.0,1023.6,349,11,4 28/11/2019,20.4,27.1,13.8,0.0,1024.5,337,19,3 27/11/2019,21.9,27.1,16.6,0.0,1021.3,336,12,0 26/11/2019,22.2,28.4,16.1,0.0,1021.1,356,6,6 25/11/2019,22.2,29.3,15.2,0.0,1020.8,344,13,3 24/11/2019,21.4,29.3,13.6,0.0,1018.5,346,5,0 23/11/2019,20.7,28.4,13.0,0.0,1017.2,352,5,1 22/11/2019,19.6,27.6,11.6,0.0,1017.3,331,6,0 21/11/2019,18.4,25.1,11.6,0.0,1019.1,323,9,1 20/11/2019,18.3,24.2,12.4,0.0,1020.3,338,7,0 19/11/2019,19.1,25.4,12.8,0.0,1020.5,342,11,0 18/11/2019,22.2,28.8,15.7,0.0,1018.8,342,17,0 17/11/2019,22.2,28.8,15.7,0.0,1015.2,358,7,3
若是在把上面的數據做爲數據集訓練,咱們還須要作些數據的預處理,由於有些狀況下咱們獲得的數據會有殘缺,這種狀況咱們就要選擇拋棄那一列或者用方差或其餘什麼的方法填充缺乏的數據。
由於在我已經決定把數據裏丟失的項所有取2了,因此下面我會列出可能的解決方法而不使用。
新建個ProcessData.py
裏創建ProcessData
方法以得到數據
# -*- coding: utf-8 -*- # @Time: 2020/12/16 # @Author: Eritque arcus # @File: ProcessData.py from Write import Write import pandas as pd from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer import seaborn as sns import matplotlib.pyplot as plt # 功能: 數據預處理 def ProcessData(): """ :return: [X_train X訓練數據集, X_valid X訓練數據集的驗證集, y_train Y訓練數據集, y_valid Y訓練數據集的驗證集, imputed_X_test 預測數據集] """ # 用近幾年的數據作訓練集 # 如 [1,1], [20, 0]就是用2019年的今天的20天前到2019年的今天數據作訓練集 # 寫入csv Write([1, 1], [20, 0], "weather_train_train.csv") Write([1, 1], [0, 20], "weather_train_valid.csv") Write([0, 0], [20, 0], "weather_test.csv") X_test = pd.read_csv("weather_test.csv", index_col="Time", parse_dates=True) # 讀取測試集和驗證集 X = pd.read_csv("weather_train_train.csv", index_col="Time", parse_dates=True) y = pd.read_csv("weather_train_valid.csv", index_col="Time", parse_dates=True) # 把所有丟失的數據都drop,MAE=3.7又高了,因此去掉了 # dxtcol = [col for col in X_test.columns # if X_test[col].isnull().all()] # dxcol = [col for col in X.columns # if X[col].isnull().all()] # dycol = [col for col in y.columns # if y[col].isnull().all()] # for a1 in [dxtcol, dxcol, dycol]: # for a2 in a1: # if a2 in X_test.columns: # X_test = X_test.drop(a2, axis=1) # if a2 in X.columns: # X = X.drop(a2, axis=1) # if a2 in y.columns: # y = y.drop(a2, axis=1) # 數據歸一化和標準化,沒法還原不用 # scaler = preprocessing.StandardScaler() # pars = [cols for cols in X.columns if cols != "Time"] # for data in [X, y, X_test]: # for par in pars: # data[par] = scaler.fit_transform(data[par].values.reshape(-1, 1)) # # temp = scaler.fit(data[par].values.reshape(-1, 1)) # # data[par] = scaler.fit_transform(data[par].values.reshape(-1, 1), temp) # 填充缺乏的數值用方差,不清楚效果如何 my_imputer = SimpleImputer() X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train)) imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid)) imputed_X_train.columns = X_train.columns imputed_X_valid.columns = X_valid.columns imputed_y_train = pd.DataFrame(my_imputer.fit_transform(y_train)) imputed_y_valid = pd.DataFrame(my_imputer.transform(y_valid)) imputed_y_train.columns = y_train.columns imputed_y_valid.columns = y_valid.columns imputed_X_test = pd.DataFrame(my_imputer.fit_transform(X_test)) # 畫折線圖 # sns.lineplot(data=X) # plt.show() # sns.lineplot(data=y) # plt.show() # sns.lineplot(data=X_test) # plt.show() # 返回分割後的數據集 return [imputed_X_train, imputed_X_valid, imputed_y_train, imputed_y_valid, imputed_X_test]