203_PyTorch中文教程:激活函數-activation function

203_PyTorch中文教程:激活函數-activation function

激活函數(activation function)在深度學習模型中起到基礎變換函數的做用,與參數和網絡層共同構成深度學習的神經網絡模型。python

依賴軟件包git

  • torch
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

首先,建立一些虛擬的測試數據。

x = torch.linspace(-5, 5, 200)  # x data (tensor), shape=(200, 1)
x = Variable(x)
x_np = x.data.numpy()   # numpy array for plotting
x
tensor([-5.0000, -4.9497, -4.8995, -4.8492, -4.7990, -4.7487, -4.6985, -4.6482,
        -4.5980, -4.5477, -4.4975, -4.4472, -4.3970, -4.3467, -4.2965, -4.2462,
        -4.1960, -4.1457, -4.0955, -4.0452, -3.9950, -3.9447, -3.8945, -3.8442,
        -3.7940, -3.7437, -3.6935, -3.6432, -3.5930, -3.5427, -3.4925, -3.4422,
        -3.3920, -3.3417, -3.2915, -3.2412, -3.1910, -3.1407, -3.0905, -3.0402,
        -2.9899, -2.9397, -2.8894, -2.8392, -2.7889, -2.7387, -2.6884, -2.6382,
        -2.5879, -2.5377, -2.4874, -2.4372, -2.3869, -2.3367, -2.2864, -2.2362,
        -2.1859, -2.1357, -2.0854, -2.0352, -1.9849, -1.9347, -1.8844, -1.8342,
        -1.7839, -1.7337, -1.6834, -1.6332, -1.5829, -1.5327, -1.4824, -1.4322,
        -1.3819, -1.3317, -1.2814, -1.2312, -1.1809, -1.1307, -1.0804, -1.0302,
        -0.9799, -0.9296, -0.8794, -0.8291, -0.7789, -0.7286, -0.6784, -0.6281,
        -0.5779, -0.5276, -0.4774, -0.4271, -0.3769, -0.3266, -0.2764, -0.2261,
        -0.1759, -0.1256, -0.0754, -0.0251,  0.0251,  0.0754,  0.1256,  0.1759,
         0.2261,  0.2764,  0.3266,  0.3769,  0.4271,  0.4774,  0.5276,  0.5779,
         0.6281,  0.6784,  0.7286,  0.7789,  0.8291,  0.8794,  0.9296,  0.9799,
         1.0302,  1.0804,  1.1307,  1.1809,  1.2312,  1.2814,  1.3317,  1.3819,
         1.4322,  1.4824,  1.5327,  1.5829,  1.6332,  1.6834,  1.7337,  1.7839,
         1.8342,  1.8844,  1.9347,  1.9849,  2.0352,  2.0854,  2.1357,  2.1859,
         2.2362,  2.2864,  2.3367,  2.3869,  2.4372,  2.4874,  2.5377,  2.5879,
         2.6382,  2.6884,  2.7387,  2.7889,  2.8392,  2.8894,  2.9397,  2.9899,
         3.0402,  3.0905,  3.1407,  3.1910,  3.2412,  3.2915,  3.3417,  3.3920,
         3.4422,  3.4925,  3.5427,  3.5930,  3.6432,  3.6935,  3.7437,  3.7940,
         3.8442,  3.8945,  3.9447,  3.9950,  4.0452,  4.0955,  4.1457,  4.1960,
         4.2462,  4.2965,  4.3467,  4.3970,  4.4472,  4.4975,  4.5477,  4.5980,
         4.6482,  4.6985,  4.7487,  4.7990,  4.8492,  4.8995,  4.9497,  5.0000])

下面是幾個流行的激活函數(activation functions)

y_relu = F.relu(x).data.numpy()
y_sigmoid = torch.sigmoid(x).data.numpy()
y_tanh = torch.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()

# y_softmax = F.softmax(x)
# softmax is a special kind of activation function, it is about probability
# and will make the sum as 1.

繪製激活函數(activation function)響應曲線圖。

%matplotlib inline
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')

plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')

plt.subplot(223)
plt.plot(x_np, y_tanh, c='red', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')

plt.subplot(224)
plt.plot(x_np, y_softplus, c='red', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')

plt.show()

顯示輸出以下:github

相關文章
相關標籤/搜索