帶包實現:算法
rm(list=ls()) setwd("C:/Users/Administrator/Desktop/R語言與數據挖掘做業/實驗4-人工神經網絡") Data=read.csv("sales_data.csv")[,2:5] library(nnet) colnames(Data)<-c("x1","x2","x3","y") model1=nnet(y~.,data=Data,size=6,decay=5e-4,maxit=1000) pred=predict(model1,Data[,1:3],type="class") (p=sum(as.numeric(pred==Data$y))/nrow(Data)) table(Data$y,pred) prop.table(table(Data$y,pred),1)
2. 深刻理解BP人工神經網絡算法,並用R語言實現該算法 網絡
本身打出一個簡單的神經網絡學習
rm(list=ls()) #install.packages("sampling") library(sampling) setwd("C:/Users/Administrator/Desktop/R語言與數據挖掘做業/實驗4-人工神經網絡") data("iris") #as.numeric(data[,5]) #這裏咱們按照每種「Species」抽取3/5個樣本進行抽樣。 n=round(3/5*nrow(iris)/3) sub_train=strata(iris,stratanames=("Species"),size=rep(n,3),method="srswor") head(sub_train) data_train=iris[sub_train$ID_unit,] data_test=iris[-sub_train$ID_unit,] dim(data_train) dim(data_test) #保存數據集 write.csv(data_train,"./iris_data_train.csv") write.csv(data_test,"./iris_data_test.csv") #對data_train歸一化處理 #colnames() data_train$Sepal.Length=(data_train$Sepal.Length-min(data_train$Sepal.Length))*1.0/ (max(data_train$Sepal.Length)-min(data_train$Sepal.Length)) data_train$Sepal.Width=(data_train$Sepal.Width-min(data_train$Sepal.Width))*1.0/ (max(data_train$Sepal.Width)-min(data_train$Sepal.Width)) data_train$Petal.Length=(data_train$Petal.Length-min(data_train$Petal.Length))*1.0/ (max(data_train$Petal.Length)-min(data_train$Petal.Length)) data_train$Petal.Width=(data_train$Petal.Width-min(data_train$Petal.Width))*1.0/ (max(data_train$Petal.Width)-min(data_train$Petal.Width)) #4個輸入,5個的隱藏層,3個輸出層 #第一塊鏈接的地方須要4*5個w,5個a,第二塊須要5*3個w,5個b f<-function(x) { x=1*1.0/(1+exp(-1*x)) return(x) } #定義訓練次數 global_time=100 #定義學習率 learning_rate=1.8 #隨機生成第一塊的w1 w1=matrix(sample((0:100)*1.0/100,size=20),4,5) a1=matrix(sample((0:100)*1.0/100,size=5),1,5) w2=matrix(sample((0:100)*1.0/100,size=15),5,3) a2=matrix(sample((0:100)*1.0/100,size=3),1,3) #遍歷每一條數據,每扔進一條數據就調參數 for(time in 1:global_time) { for(i in 1:length(data_train$Species)) { #1*4 x1=matrix(c(data_train$Sepal.Length[i],data_train$Sepal.Width[i],data_train$Petal.Length[i],data_train$Petal.Width[i]),1,4) #矩陣相乘 %*% #獲得通過第一個隱藏層的輸出,也就是最後輸出層的輸入 x2=f(x1%*%w1-a1) #獲得最後的輸出層,是1*3的矩陣 x3=f(x2%*%w2-a2) #把標籤變成1*3的矩陣 y=matrix(0.1,1,3) if(data_train$Species[i]=="setosa" ){y[1]=0.9} if(data_train$Species[i]=="versicolor"){y[2]=0.9} if(data_train$Species[i]=="virginica"){y[3]=0.9} #與標籤比較調參,輸出層的偏差項爲output*(1-output)*(y-output) #print(y) #print(data_train$Species[i]) cha=x3*(1-x3)*(y-x3) #cat("loss",mean(cha)) #print("") #更新隱藏層和輸出層之間的w2,dw2=w2+learning_rate * cha * xi #經過x2(豎着),t-o(橫着)相乘獲得5*3的矩陣和w2相加來更新 tx2=t(x2) dw2=learning_rate * (tx2 %*% cha) #把以前的w2存下來,以後更新要用 before_w2=w2 w2=w2+dw2 #更新輸入層和隱藏層之間的w1,dw1=w1+learning_rate * cha * xi #隱藏層的偏差項不是直接獲得的,須要經過後一層的偏差項計算,爲(和(cha1*wi)) #隱藏層的偏差項爲 w2(5*3) %*% cha(3*1),的cha2(5*1),注:用的是每更新前的w2 cha2 = before_w2 %*% t(cha) tx1=t(x1) dw1=learning_rate * (tx1 %*% t(cha2)) w1=w1+dw1 } } #看看擬合度 SUM=length(data_train$Species) right=0 for(i in 1:length(data_train$Species)) { #1*4 x1=matrix(c(data_train$Sepal.Length[i],data_train$Sepal.Width[i],data_train$Petal.Length[i],data_train$Petal.Width[i]),1,4) #矩陣相乘 %*% #獲得通過第一個隱藏層的輸出,也就是最後輸出層的輸入 x2=f(x1%*%w1-a1) #獲得最後的輸出層,是1*3的矩陣 x3=f(x2%*%w2-a2) print(x3) y1=matrix(c(0.9,0.1,0.1),1,3) y2=matrix(c(0.1,0.9,0.1),1,3) y3=matrix(c(0.1,0.1,0.9),1,3) # cha11=x3*(1-x3)*(y1-x3) # cha22=x3*(1-x3)*(y2-x3) # cha33=x3*(1-x3)*(y3-x3) cha11=(y1-x3) cha22=(y2-x3) cha33=(y3-x3) cha1=0 cha2=0 cha3=0 for(j in 1:3) { cha1=cha1+abs(cha11[j]) cha2=cha2+abs(cha22[j]) cha3=cha3+abs(cha33[j]) } micha=min(cha1,cha2,cha3) #cat("micha",micha,"\n") #cat("cha1",cha1,"\n") #cat("cha2",cha2,"\n") #cat("cha3",cha3,"\n") if(micha==cha1 & data_train$Species[i]=="setosa") {print(1) right=right+1} if(micha==cha2 & data_train$Species[i]=="versicolor"){print(2) right=right+1} if(micha==cha3 & data_train$Species[i]=="virginica") {print(3) right=right+1} } print("擬合度爲:") print((right*1.0/SUM)) print("sum") print(SUM) print("right") print(right) #訓練結束,看看參數 print("w1") print(w1) print("w2") print(w2) data_test$Sepal.Length=(data_test$Sepal.Length-min(data_test$Sepal.Length))*1.0/ (max(data_test$Sepal.Length)-min(data_test$Sepal.Length)) data_test$Sepal.Width=(data_test$Sepal.Width-min(data_test$Sepal.Width))*1.0/ (max(data_test$Sepal.Width)-min(data_test$Sepal.Width)) data_test$Petal.Length=(data_test$Petal.Length-min(data_test$Petal.Length))*1.0/ (max(data_test$Petal.Length)-min(data_test$Petal.Length)) data_test$Petal.Width=(data_test$Petal.Width-min(data_test$Petal.Width))*1.0/ (max(data_test$Petal.Width)-min(data_test$Petal.Width)) #用測試數據測試一下準確率如何 SUM=length(data_test$Species) right=0 for(i in 1:length(data_test$Species)) { #1*4 x1=matrix(c(data_test$Sepal.Length[i],data_test$Sepal.Width[i],data_test$Petal.Length[i],data_test$Petal.Width[i]),1,4) #矩陣相乘 %*% #獲得通過第一個隱藏層的輸出,也就是最後輸出層的輸入 x2=f(x1%*%w1-a1) #獲得最後的輸出層,是1*3的矩陣 x3=f(x2%*%w2-a2) cha11=(y1-x3) cha22=(y2-x3) cha33=(y3-x3) cha1=0 cha2=0 cha3=0 for(j in 1:3) { cha1=cha1+abs(cha11[j]) cha2=cha2+abs(cha22[j]) cha3=cha3+abs(cha33[j]) } micha=min(cha1,cha2,cha3) #cat("micha",micha,"\n") #cat("cha1",cha1,"\n") #cat("cha2",cha2,"\n") #cat("cha3",cha3,"\n") if(micha==cha1 & data_test$Species[i]=="setosa") {print(1) right=right+1} if(micha==cha2 & data_test$Species[i]=="versicolor"){print(2) right=right+1} if(micha==cha3 & data_test$Species[i]=="virginica") {print(3) right=right+1} } print("accuracy:") print((right*1.0/SUM)) cat("right",right) print("") cat("SUM",SUM)
2. 帶包實現BP人工神經完成iris測試
rm(list=ls()) #install.packages("sampling") library(nnet) library(sampling) setwd("C:/Users/Administrator/Desktop/R???????????ฺพ???าต/สต??4-?หน?????????") data("iris") iris$Sepal.Length=(iris$Sepal.Length-min(iris$Sepal.Length))*1.0/ (max(iris$Sepal.Length)-min(iris$Sepal.Length)) iris$Sepal.Width=(iris$Sepal.Width-min(iris$Sepal.Width))*1.0/ (max(iris$Sepal.Width)-min(iris$Sepal.Width)) iris$Petal.Length=(iris$Petal.Length-min(iris$Petal.Length))*1.0/ (max(iris$Petal.Length)-min(iris$Petal.Length)) iris$Petal.Width=(iris$Petal.Width-min(iris$Petal.Width))*1.0/ (max(iris$Petal.Width)-min(iris$Petal.Width)) n=round(3/5*nrow(iris)/3) sub_train=strata(iris,stratanames=("Species"),size=rep(n,3),method="srswor") head(sub_train) colnames(iris)<-c("x1","x2","x3","x4","y") data_train=iris[sub_train$ID_unit,] data_test=iris[-sub_train$ID_unit,] dim(data_train) dim(data_test) model1=nnet(y~.,data=data_train,size=6,decay=5e-5,maxit=1000) pred=predict(model1,data_test[,1:4],type="class") P=sum(as.numeric(pred==data_test$y))/nrow(data_test) cat("accuracy",P*100,"%\n") table(data_test$y,pred)