KFold,GroupKFold,StratifiedKFold,python
LeaveOneGroupOut,LeavePGroupsOut,LeaveOneOut,LeavePOut,dom
ShuffleSplit,GroupShuffleSplit,StratifiedShuffleSplit,測試
流程:3d
實例化分類器 -> 迭代器迭代組[.split()]blog
#KFold import numpy as np from sklearn.model_selection import KFold X=np.array([[1,2],[3,4],[5,6],[7,8],[9,10],[11,12]]) y=np.array([1,2,3,4,5,6]) kf=KFold(n_splits=2) # 定義分紅幾個組 # kf.get_n_splits(X) # 查詢分紅幾個組 print(kf) for train_index,test_index in kf.split(X): print("Train Index:",train_index,",Test Index:",test_index) X_train,X_test=X[train_index],X[test_index] y_train,y_test=y[train_index],y[test_index] #print(X_train,X_test,y_train,y_test)
# GroupKFold,不是很懂這個劃分方法 import numpy as np from sklearn.model_selection import GroupKFold X=np.array([[1,2],[3,4],[5,6],[7,8],[9,10],[11,12]]) y=np.array([1,2,3,4,5,6]) groups=np.array([1,2,3,4,5,6]) group_kfold=GroupKFold(n_splits=2) group_kfold.get_n_splits(X,y,groups) print(group_kfold) for train_index,test_index in group_kfold.split(X,y,groups): print("Train Index:",train_index,",Test Index:",test_index) X_train,X_test=X[train_index],X[test_index] y_train,y_test=y[train_index],y[test_index] #print(X_train,X_test,y_train,y_test) #GroupKFold(n_splits=2) #Train Index: [0 2 4] ,Test Index: [1 3 5] #Train Index: [1 3 5] ,Test Index: [0 2 4]
# stratifiedKFold:保證訓練集中每一類的比例是相同的(儘可能) import numpy as np from sklearn.model_selection import StratifiedKFold X=np.array([[1,2],[3,4],[5,6],[7,8],[9,10],[11,12]]) y=np.array([1,1,1,2,2,2]) skf=StratifiedKFold(n_splits=3) skf.get_n_splits(X,y) print(skf) for train_index,test_index in skf.split(X,y): print("Train Index:",train_index,",Test Index:",test_index) X_train,X_test=X[train_index],X[test_index] y_train,y_test=y[train_index],y[test_index] #print(X_train,X_test,y_train,y_test) #StratifiedKFold(n_splits=3, random_state=None, shuffle=False) #Train Index: [1 2 4 5] ,Test Index: [0 3] #Train Index: [0 2 3 5] ,Test Index: [1 4]
# leaveOneOut:測試集就留下一個 import numpy as np from sklearn.model_selection import LeaveOneOut X=np.array([[1,2],[3,4],[5,6],[7,8],[9,10],[11,12]]) y=np.array([1,2,3,4,5,6]) loo=LeaveOneOut() loo.get_n_splits(X) print(loo) for train_index,test_index in loo.split(X,y): print("Train Index:",train_index,",Test Index:",test_index) X_train,X_test=X[train_index],X[test_index] y_train,y_test=y[train_index],y[test_index] #print(X_train,X_test,y_train,y_test) #LeaveOneOut() #Train Index: [1 2 3 4 5] ,Test Index: [0] #Train Index: [0 2 3 4 5] ,Test Index: [1] #Train Index: [0 1 3 4 5] ,Test Index: [2] #Train Index: [0 1 2 4 5] ,Test Index: [3] #Train Index: [0 1 2 3 5] ,Test Index: [4] #Train Index: [0 1 2 3 4] ,Test Index: [5]
LeavePOut:測試集留下P個 import numpy as np from sklearn.model_selection import LeavePOut X=np.array([[1,2],[3,4],[5,6],[7,8],[9,10],[11,12]]) y=np.array([1,2,3,4,5,6]) lpo=LeavePOut(p=3) lpo.get_n_splits(X) print(lpo) for train_index,test_index in lpo.split(X,y): print("Train Index:",train_index,",Test Index:",test_index) X_train,X_test=X[train_index],X[test_index] y_train,y_test=y[train_index],y[test_index] #print(X_train,X_test,y_train,y_test) #LeavePOut(p=3) #Train Index: [3 4 5] ,Test Index: [0 1 2] #Train Index: [2 4 5] ,Test Index: [0 1 3] #Train Index: [2 3 5] ,Test Index: [0 1 4] #Train Index: [2 3 4] ,Test Index: [0 1 5] #Train Index: [1 4 5] ,Test Index: [0 2 3] #Train Index: [1 3 5] ,Test Index: [0 2 4] #Train Index: [1 3 4] ,Test Index: [0 2 5] #Train Index: [1 2 5] ,Test Index: [0 3 4] #Train Index: [1 2 4] ,Test Index: [0 3 5] #Train Index: [1 2 3] ,Test Index: [0 4 5] #Train Index: [0 4 5] ,Test Index: [1 2 3] #Train Index: [0 3 5] ,Test Index: [1 2 4] #Train Index: [0 3 4] ,Test Index: [1 2 5] #Train Index: [0 2 5] ,Test Index: [1 3 4] #Train Index: [0 2 4] ,Test Index: [1 3 5] #Train Index: [0 2 3] ,Test Index: [1 4 5] #Train Index: [0 1 5] ,Test Index: [2 3 4] #Train Index: [0 1 4] ,Test Index: [2 3 5] #Train Index: [0 1 3] ,Test Index: [2 4 5] #Train Index: [0 1 2] ,Test Index: [3 4 5]
# ShuffleSplit 把數據集打亂順序,而後劃分測試集和訓練集,訓練集額和測試集的比例隨機選定, # 訓練集和測試集的比例的和能夠小於1 import numpy as np from sklearn.model_selection import ShuffleSplit X=np.array([[1,2],[3,4],[5,6],[7,8],[9,10],[11,12]]) y=np.array([1,2,3,4,5,6]) rs=ShuffleSplit(n_splits=3,test_size=.25,random_state=0) rs.get_n_splits(X) print(rs) for train_index,test_index in rs.split(X,y): print("Train Index:",train_index,",Test Index:",test_index) X_train,X_test=X[train_index],X[test_index] y_train,y_test=y[train_index],y[test_index] #print(X_train,X_test,y_train,y_test) print("==============================") rs=ShuffleSplit(n_splits=3,train_size=.5,test_size=.25,random_state=0) rs.get_n_splits(X) print(rs) for train_index,test_index in rs.split(X,y): print("Train Index:",train_index,",Test Index:",test_index) #ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None) #Train Index: [1 3 0 4] ,Test Index: [5 2] #Train Index: [4 0 2 5] ,Test Index: [1 3] #Train Index: [1 2 4 0] ,Test Index: [3 5] #============================== #ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=0.5) #Train Index: [1 3 0] ,Test Index: [5 2] #Train Index: [4 0 2] ,Test Index: [1 3] #Train Index: [1 2 4] ,Test Index: [3 5]
# StratifiedShuffleSplitShuffleSplit 把數據集打亂順序,而後劃分測試集和訓練集, # 訓練集額和測試集的比例隨機選定,訓練集和測試集的比例的和能夠小於1,可是還要保證訓練集中各種所佔的比例是同樣的 import numpy as np from sklearn.model_selection import StratifiedShuffleSplit X=np.array([[1,2],[3,4],[5,6],[7,8],[9,10],[11,12]]) y=np.array([1,2,1,2,1,2]) sss=StratifiedShuffleSplit(n_splits=3,test_size=.5,random_state=0) sss.get_n_splits(X,y) print(sss) for train_index,test_index in sss.split(X,y): print("Train Index:",train_index,",Test Index:",test_index) X_train,X_test=X[train_index],X[test_index] y_train,y_test=y[train_index],y[test_index] #print(X_train,X_test,y_train,y_test) #StratifiedShuffleSplit(n_splits=3, random_state=0, test_size=0.5,train_size=None) #Train Index: [5 4 1] ,Test Index: [3 2 0] #Train Index: [5 2 3] ,Test Index: [0 4 1] #Train Index: [5 0 4] ,Test Index: [3 1 2]