本教程展現瞭如何從瞭解張量開始到使用PyTorch訓練簡單的神經網絡,是很是基礎的PyTorch入門資源.PyTorch創建在Python和火炬庫之上,並提供了一種相似Numpy的抽象方法來表徵量(或多維數組),它還能利用GPU來提高性能。本教程的代碼並不完整,詳情請查看原Jupyter Notebook文檔。
#生成2-d pytorch張量(即,基質)
pytorch_tensor = torch.Tensor(10,20)
的打印(「類型:」 ,類型(pytorch_tensor), 「 和尺寸:」,pytorch_tensor.shape)
複製代碼
#將pytorch張量轉換爲numpy數組:
numpy_tensor = pytorch_tensor.numpy()
print(「type:」,type(numpy_tensor),「and size:」,numpy_tensor.shape)
#將numpy數組轉換爲Pytorch Tensor:
print(「type:」,type(torch.Tensor(numpy_tensor)),「and size:」,torch.Tensor(numpy_tensor).shape)
複製代碼
T = torch.rand(2,4,3,5)
一個= np.random.rand(2,4,3,5)
複製代碼
T = torch.rand(2,4,3,5)
一個= t.numpy()
pytorch_slice = T [ 0,1:3,:,4 ]
numpy_slice = A [ 0,1:3,:,4 ]
打印('張量[0,1:3,:1,4]:\ N',pytorch_slice)
打印('NdArray [0,1:3,:1,4]:\ N',numpy_slice)
------- -------------------------------------------------- ----------------
張量[ 0,1:3,:,4 ]:
0.2032 0.1594 0.3114
0.9073 0.6497 0.2826
[torch.FloatTensor大小的2 ×3]
NdArray [ 0,1:3,:,4 ]:
[[ 0.20322084 0.15935552 0.31143939 ]
[ 0.90726137 0.64966112 0.28259504 ]]
複製代碼
t = t - 0.5
a = t.numpy()
pytorch_masked = t [t> 0 ]
numpy_masked = a [a> 0 ]
複製代碼
pytorch_reshape = t.view([ 6,5,4 ])
numpy_reshape = a.reshape([ 6,5,4 ])
複製代碼
從 torch.autograd 進口可變
進口 torch.nn.functional 做爲 ˚F
X =變數(torch.randn(4,1),requires_grad = 假)
Y =變量(torch.randn(3,1),requires_grad = 假)
複製代碼
W1 =變數(torch.randn(5,4),requires_grad = 真)
W2 =變量(torch.randn(3,5),requires_grad = 真)
複製代碼
def model_forward (x):
return F.sigmoid(w2 @ F.sigmoid(w1 @ x))
print(w1)
print(w1.data.shape)
print(w1.grad)#最初,不存在
---- -------------------------------------------------- -------------------
可變含:
1.6068 -1.3304 -0.6717 -0.6097
-0.3414 -0.5062 -0.2533 1.0260
-0.0341 -1.2144 -1.5983 -0.1392
-0.5473 0.0084 0.4054 0.0970
0.3596 0.5987 -0.0324 0.6116
[torch.Float傳感器的大小5 X4的]
torch.Size([ 5,4 ])
無
複製代碼
導入 torch.nn 爲 nn
條件= nn.MSELoss()
複製代碼
導入 torch.optim 做爲 optim
optimizer = optim.SGD([w1,w2],lr = 0.001)
複製代碼
對於曆元在範圍(10):
損耗=標準(model_forward(x)中,y)的
optimizer.zero_grad() #零出之前梯度
loss.backward()#計算新梯度
optimizer.step() #應用這些梯度
打印( w1)
------------------------------------------------ -------------------------
變量包含:
1.6067 -1.3303 -0.6717 -0.6095
-0.3414 -0.5062 -0.2533 1.0259
-0.0340 -1.2145 -1.5983 -0.1396
-0.5476 0.0085 0.4055 0.0976
0.3597 0.5986 -0.0324 0.6113
[火炬。尺寸爲5 x4的飛濺傳感器 ]
複製代碼
cuda_gpu = torch.cuda.is_available()
if(cuda_gpu):
print(「Great,you have a GPU!」)
else:
print(「Life is short - consider a GPU!」)
複製代碼
若是 cuda_gpu:
x = x.cuda()
print(type(x.data))
x = x.cpu()
print(type(x.data))
--------------- -------------------------------------------------- --------
< class ' 火炬。cuda。FloatTensor '>
< class ' 火炬。FloatTensor '>
複製代碼
def train (model,epoch,criterion,optimizer,data_loader):
model.train()
for batch_idx,(data,target) in enumerate(data_loader):
if cuda_gpu:
data,target = data.cuda(),target.cuda( )
model.cuda()
數據,目標=變量(數據),變量(目標)
輸出=模型(數據)
optimizer.zero_grad()
損失=標準(輸出,目標)
loss.backward()
optimizer.step()
if( batch_idx + 1)% 400 == 0:
print('Train Epoch:{} [{} / {}({:.0f}%)] \ tLoss:{:.6f}'. format(
epoch,(batch_idx + 1)* len(data),len(data_loader.dataset ),
100 *(batch_idx + 1)/ LEN(data_loader),loss.data [ 0 ]))
DEF 試驗(模型,曆元,標準,data_loader) :
model.eval()
test_loss = 0
正確= 0
爲數據,目標在 data_loader中:
if cuda_gpu:
data,target = data.cuda(),target.cuda()
model.cuda()
數據,target =變量(數據),變量(目標)
output = model(data)
test_loss + = criterion(output,target).data [ 0 ]
pred = output.data.max(1)[ 1 ] #獲取最大對數機率索引
+ = pred.eq(目標數據).cpu()。sum()
test_loss / = len(data_loader)#失敗函數已經在批量大小上取平均值
acc = correct / len(data_loader.dataset)
print('\ nTest set:Average loss:{:。 4f},準確度:{} / {}({:.0f }%)\ n'.format(
test_loss,correct,len(data_loader.dataset),100. * acc))
return(acc,test_loss)
複製代碼
從 sklearn.datasets 導入 make_regression
進口 seaborn 做爲 SNS
進口熊貓做爲 PD
進口 matplotlib.pyplot 做爲 PLT
sns.set()
x_train,y_train,W_target = make_regression(N_SAMPLES次= 100,n_features = 1,噪聲= 10,COEF = 真)
DF = pd.DataFrame(data = { 'X':x_train.ravel(),'Y':y_train.ravel()})
sns.lmplot(x = 'X',y = 'Y',data = df,fit_reg = True)
plt.show()
1)x_torch = torch.FloatTensor(x_train)
y_torch = torch.FloatTensor(y_train)
y_torch = y_torch.view(y_torch.size()[ 0 ],1)
複製代碼
class LinearRegression (torch.nn.Module):
def __init__ (self,input_size,output_size):
super(LinearRegression,self).__ init __()
self.linear = torch.nn.Linear(input_size,output_size)
def forward (self,x ):
返回 self.linear(x)的
模型=線性迴歸( 1, 1)
複製代碼
咱們還須要使用優化函數(SGD),並運行與以前示例相似的反向傳播。本質上,咱們重複上文定義的train()函數中的步驟。不能直接使用該函數的緣由是咱們實現它的目的是分類而不是迴歸,以及咱們使用交叉熵損失和最大元素的索引做爲模型預測。而對於線性迴歸,咱們使用線性層的輸出做爲預測。git
準則= torch.nn.MSELoss()
優化= torch.optim.SGD(model.parameters(),LR = 0.1)
對於曆元在範圍(50):
數據,目標=變量(x_torch),變量(y_torch)
輸出= model(data)
optimizer.zero_grad()
loss = criterion(output,target)
loss.backward()
optimizer.step()
predict = model(Variable(x_torch))。data.numpy()
複製代碼
plt.plot(x_train,y_train,'o',label = '原始數據')
plt.plot(x_train,predicted,label = 'Fitted line')
plt.legend()
plt.show()
複製代碼
從 torchvision 導入數據集,變換
batch_num_size = 64
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data',train = True,download = True,transform = transforms.Compose([
transforms.ToTensor()),
轉換。 Normalize((0.1307,),(0.3081,))
])),
batch_size = batch_num_size,shuffle = True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data',train = False,transform = transforms。撰寫([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
])),
batch_size = batch_num_size,shuffle = True)
複製代碼
類 LeNet (nn.Module) :
DEF __init__ (個體):
超級(LeNet,自我).__ INIT __()
self.conv1 = nn.Conv2d( 1, 10,kernel_size = 5)
self.conv2 = nn.Conv2d( 10, 20,kernel_size = 5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear( 320, 50)
self.fc2 = nn.Linear( 50, 10)
DEF 向前(個體,X) :
X = F .relu(F.max_pool2d(self.conv1(x)的2))
X = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(X)),2))
X = x.view(-1,320)
X = F.relu(self.fc1(X ))
x = F.dropout(x,training = self.training)
x = self.fc2(x)
return F.log_softmax(x,dim = 1)
複製代碼
model = LeNet()
if cuda_gpu:
model.cuda()
print('MNIST_net model:\ n')
print(model)
----------------------- --------------------------------------------------
MNIST_net模型:
LeNet(
(CONV1):Conv2d(1,10,kernel_size =(5,5),跨度=(1,1))
(CONV2):Conv2d(10,20,kernel_size =(5,5),步幅=(1,1))
(conv2_drop):Dropout2d(p值=0.5)
(fc1):線性(in_features = 320,out_features = 50,bias = 真)
(fc2):線性(in_features = 50,out_features = 10,bias = 真)
)
複製代碼
criteria = nn.CrossEntropyLoss()
優化器= optim.SGD(model.parameters(),lr = 0.005,動量= 0.9)
複製代碼
import os epochs
= 5
if(os.path.isfile('pretrained / MNIST_net.t7')):
print('Loading model')
model.load_state_dict(torch.load('pretrained / MNIST_net.t7',map_location = lambda storage ,LOC:存儲))
ACC,損耗=試驗(模型,1,標準,test_loader)
不然:
打印('訓練模式')
用於曆元在範圍(1,曆元+ 1:)
系(模型,曆元,標準,優化,train_loader)
acc,loss = test(model,1,criterion,test_loader)
torch.save(model.state_dict(),'pretrained / MNIST_net.t7')
------------------ -------------------------------------------------- -----
加載模型
試驗組:平均損耗:0.0471,準確度:9859號文件 / 10000(99%)
複製代碼
print('Internal models:')
for idx,m in enumerate(model.named_modules()):
print(idx,' - >',m)
print('-------------- -------------------------------------------------- ---------」) #輸出: 內部模型: 0 - >('',LeNet( (CONV1):Conv2d(1,10,kernel_size =(5,5),跨度=(1,1)) (CONV2):Conv2d(10,20,kernel_size =(5,5),跨度=(1,1)) (conv2_drop):Dropout2d(p值= 0.5) (FC1):線性(in_features = 320,out_features = 50,偏壓= 真) (FC2):線性(in_features = 50,out_features = 10,bias = True) )) ----------------------------------------- -------------------------------- 1 - >('CONV1',Conv2d(1,10,kernel_size =(5,5),跨度=(1,1))) -------------------------------------------------- ----------------------- 2 - >('CONV2',Conv2d(10,20,kernel_size =(5,5),跨度=(1,1))) ---------------------------------------------- --------------------------- 3 - >('conv2_drop',Dropout2d(p = 0.5)) -------- -------------------------------------------------- --------------- 4 - >('fc1',Linear(in_features = 320,out_features = 50,bias = True)) -------------------------------------------------- ----------------------- 5 - >('fc2',Linear(in_features = 50,out_features = 10,bias = True)) ---- -------------------------------------------------- ------------------- 複製代碼
print(type(t.cpu()。data))
if torch.cuda.is_available():
print(「Cuda is available」)
print(type(t.cuda()。data))
else:
print(「Cuda is不可用「)
---------------------------------------------- ---------------------------
< 類 ' 炬。FloatTensor '>
Cuda的 是 可用的
< 類 ' 炬。cuda。FloatTensor '>
複製代碼
若是 torch.cuda.is_available():
嘗試:
打印(t.data.numpy()),
除了 RuntimeError 爲 e:
「你不能將GPU張量轉換爲numpy nd數組,你必須將你的weight tendor複製到cpu而後獲取numpy數組「
print(type(t.cpu()。data.numpy()))
print(t.cpu()。data.numpy()。shape)
print(t.cpu()。data)。 numpy的())
複製代碼
data = model.conv1.weight.cpu()。data.numpy()
print(data.shape)
print(data [:, 0 ] .shape)
kernel_num = data.shape [ 0 ]
fig,axes = plt.subplots( NCOLS = kernel_num,figsize =(2 * kernel_num,2))
爲山口在範圍(kernel_num):
軸[COL] .imshow(數據[COL,0,:,:],CMAP = plt.cm.gray)
PLT。顯示()
複製代碼