泰坦尼克號

  數據分析領域都有一個經典的入門題目,泰坦尼克號生還者預測。數據集能夠去kaggle下載。正則表達式

  

import pandas as pd import numpy as np import matplotlib.pyplot as plt titanic = pd.read_csv('D:/train.csv') # print(titanic.head()) # print(titanic.describe())

#經過describe發現age數據不完整,須要填充
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median()) # print(titanic.describe())

# print(titanic['Sex'].unique()) #將字符值轉換成數值
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0 titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1

#登船地址 # print(titanic['Embarked'].unique())
titanic['Embarked'] = titanic['Embarked'].fillna('S') titanic.loc[titanic['Embarked'] == 'S', 'Embarked'] = 0 titanic.loc[titanic['Embarked'] == 'C', 'Embarked'] = 1 titanic.loc[titanic['Embarked'] == 'Q', 'Embarked'] = 2



#用線性迴歸來預測
from sklearn.linear_model import LinearRegression from sklearn.cross_validation import KFold predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] alg = LinearRegression() kf = KFold(titanic.shape[0], n_folds=3, random_state=1) #預測結果
predictions = [] #訓練集,測試集,交叉驗證
for train, test in kf: train_predictors = (titanic[predictors].iloc[train, :]) train_target = titanic['Survived'].iloc[train] #訓練數據的X,Y => 讓他進行判斷
 alg.fit(train_predictors, train_target) test_predictions = alg.predict(titanic[predictors].iloc[test, :]) predictions.append(test_predictions) predictions = np.concatenate(predictions, axis=0) predictions[predictions > 0.5] = 1 predictions[predictions <= 0.5] = 0 #進行模型評估
accuracy = sum(predictions[predictions == titanic['Survived']]) / len(predictions) # print(accuracy)


#用邏輯迴歸來預測
from sklearn import cross_validation from sklearn.linear_model import LogisticRegression alg = LogisticRegression(random_state=1) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic['Survived'], cv=3) # print(scores.mean())



#用隨機森林來作
from sklearn.ensemble import RandomForestClassifier predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] alg = RandomForestClassifier(random_state=1, n_estimators=1000, min_samples_split=8, min_samples_leaf=8) kf = cross_validation.KFold(titanic.shape[0], n_folds=3, random_state=1) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic['Survived'], cv=kf) print(scores.mean()) #關於特徵提取問題 (很是關鍵) #儘量多的提取特徵 #看不一樣特徵的效果 #特徵提取是數據挖掘裏很- 要的一部分 #以上使用的特徵都是數據裏已經有的了,在真實的數據挖掘裏咱們經常沒有合適的特徵,須要咱們本身取提取

#合併數據:本身生成一個特徵,家庭成員的大小:兄弟姐妹+老人孩子
titanic['FamilySize'] = titanic['SibSp'] + titanic['Parch'] #名字的長度
titanic['NameLength'] = titanic['Name'].apply(lambda x : len(x)) import re def get_title(name): #使用正則表達式匹配出Mr等
    title_search = re.search(' ([A-Za-z]+)\.', name) if title_search: return title_search.group(1) return "" titles = titanic['Name'].apply(get_title) # print(pd.value_counts(titles))

#國外不一樣階層的人都有不一樣的稱呼
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2 } for k, v in title_mapping.items(): #將不一樣的稱呼替換成機器能夠計算的數字
    titles[titles == k] = v print(pd.value_counts(titles)) titanic['Title'] = titles # 進行特徵選擇 # 特徵重要性分析 # 分析 不一樣特徵對 最終結果的影響 # 例如 衡量age列的重要程度時,什麼也不幹,獲得一個錯誤率error1, # 加入一些噪音數據,替換原來的值(注意,此時其餘列的數據不變),又獲得一個一個錯誤率error2 # 兩個錯誤率的差值 能夠體現這一個特徵的重要性
from sklearn.feature_selection import SelectKBest, f_classif import matplotlib.pylab as plt # 選中一些特徵
predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', "Embarked", 'FamilySize', 'Title', 'NameLength'] #選擇特徵
selector = SelectKBest(f_classif, k = 5) selector.fit(titanic[predictors], titanic['Survived']) scores= -np.log10(selector.pvalues_) plt.bar(range(len(predictors)), scores) plt.xticks(range(len(predictors)), predictors, rotation = 'vertical') # plt.show()

#經過以上特徵重要性分析,選擇出4個最重要的特徵,從新進行隨機森林的算法
predictors = ['Pclass', 'Sex', 'Fare', 'Title'] alg = RandomForestClassifier(random_state=1, n_estimators=100, min_samples_split=8, min_samples_leaf=8) kf = cross_validation.KFold(titanic.shape[0], n_folds=3, random_state=1) scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic['Survived'], cv=kf) print(scores.mean()) #能夠使用多種算法一塊兒來訓練,最後取平均值防止過擬合
from sklearn.ensemble import GradientBoostingClassifier algorithms = [ [LogisticRegression(random_state=1), ['Pclass', 'Sex', 'Fare', 'Title']], [RandomForestClassifier(random_state=1, n_estimators=100, min_samples_split=8, min_samples_leaf=8), ['Pclass', 'Sex', 'Fare', 'Title']] ] # Initialize the cross validation folds
kf = KFold(titanic.shape[0], n_folds=3, random_state=1) predictions = [] for train, test in kf: train_target = titanic['Survived'].iloc[train] full_test_predictions = [] # Make predictions for each algorithm on each folds
    for alg, predictors in algorithms: # Fit the algorithm on the training data.
 alg.fit(titanic[predictors].iloc[train, :], train_target) # Select and predict on the test fold.
        # The astype(float) is necessary to convert the dataframe
        test_predictions = alg.predict_proba(titanic[predictors].iloc[test, :].astype(float))[:, 1] full_test_predictions.append(test_predictions) # Use a simple ensembling scheme - just average the predictions to get the final classification
    # 兩個算法, 分別算出來的 預測值, 取平均
    test_predictions = (full_test_predictions[0] * 3 + full_test_predictions[1]) / 2
    # Any value over 5 is assumed to be a 1 prediction, and below 5 is a 0 prediction
    test_predictions[test_predictions <= 0.5] = 0 test_predictions[test_predictions > .5] = 1 predictions.append(test_predictions) # Put all the predictions together into one array
predictions = np.concatenate(predictions, axis=0) accuracy = sum(predictions[predictions == titanic['Survived']]) / len(predictions) print(accuracy)
相關文章
相關標籤/搜索
本站公眾號
   歡迎關注本站公眾號,獲取更多信息