多項式擬合javascript
%matplotlib inline
import gluonbook as gb
from mxnet import nd,autograd, gluon
from mxnet.gluon import data as gdata,loss as gloss,nn
生成數據集 $$y=1.2x - 3.4x^2 + 5.6x^3 + 5 + \alpha$$css
n_train = 100
n_test = 100
true_w = [1.2,-3.4,5.6]
true_b = 5
features = nd.random.normal(shape=(n_train+n_test,1))
features.shape
poly_features = nd.concat(features, nd.power(features, 2),nd.power(features, 3))
poly_features.shape
labels = (true_w[0]*poly_features[:,0]+true_w[1]*poly_features[:,1]+true_w[2]*poly_features[:,2]+true_b)
labels += nd.random.normal(scale=0.1,shape=labels.shape)
features[:2], poly_features[:2], labels[:2]
定義,訓練,測試模型html
平方損失函數html5
num_epochs = 100
loss = gloss.L2Loss()
def fit_and_plot(train_features,test_features,train_labels,test_labels):
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize()
batch_size = min(10,train_labels.shape[0])
train_iter = gdata.DataLoader(gdata.ArrayDataset(
train_features,train_labels),batch_size,shuffle=True)
trainer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.01})
train_ls = []
test_ls = []
for _ in range(num_epochs):
for X, y in train_iter:
with autograd.record():
l = loss(net(X),y)
l.backward()
trainer.step(batch_size)
train_ls.append(loss(net(train_features),
train_labels).mean().asscalar())
test_ls.append(loss(net(test_features),
test_labels).mean().asscalar())
print('final epoch: train loss', train_ls[-1], 'test loss', test_ls[-1])
print('weight:',net[0].weight.data().asnumpy())
print('bias:',net[0].bias.data().asnumpy())
fit_and_plot(poly_features[:n_train, :], poly_features[n_train:, :],
labels[:n_train], labels[n_train:])
poly_features.shape
欠擬合java
features.shape
fit_and_plot(features[:n_train, :], features[n_train:, :], labels[:n_train],
labels[n_train:])
過擬合node
fit_and_plot(poly_features[0:2, :], poly_features[n_train:, :], labels[0:2],
labels[n_train:])