我们将从零开始实现整个方法,包括数据流水线、模型、损失函数和小批量随机梯度下降优化器
%matplotlib inline
import random
import torch
from d2l import torch as d2l
根据带有噪声的线性模型构造一个人造数据集。 我们使用线性模型参数$\mathbf{w} = [2, -3.4]^\top$、$b = 4.2$和噪声项$\epsilon$生成数据集及其标签:
$$\mathbf{y}= \mathbf{X} \mathbf{w} + b + \mathbf\epsilon$$def synthetic_data(w, b, num_examples):
"""生成 y = Xw + b + 噪声。"""
X = torch.normal(0, 1, (num_examples, len(w)))
y = torch.matmul(X, w) + b
y += torch.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1))
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
features
中的每一行都包含一个二维数据样本,labels
中的每一行都包含一维标签值(一个标量)
print('features:', features[0], '\nlabel:', labels[0])
features: tensor([-0.6612, -1.8215]) label: tensor([9.0842])
d2l.set_figsize()
d2l.plt.scatter(features[:, (1)].detach().numpy(),
labels.detach().numpy(), 1);
定义一个data_iter
函数,
该函数接收批量大小、特征矩阵和标签向量作为输入,生成大小为batch_size
的小批量
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
batch_indices = torch.tensor(indices[i:min(i +
batch_size, num_examples)])
yield features[batch_indices], labels[batch_indices]
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, '\n', y)
break
tensor([[ 0.3161, -1.1807], [ 0.8956, 1.3530], [-0.9413, -0.2198], [ 0.2601, -1.4972], [ 0.2821, 0.0491], [ 1.2272, 0.3696], [-1.0256, -0.5918], [-1.9352, 0.6146], [-0.3074, -1.5586], [ 0.7992, 0.4547]]) tensor([[ 8.8543], [ 1.3933], [ 3.0845], [ 9.7996], [ 4.5993], [ 5.3924], [ 4.1613], [-1.7471], [ 8.8908], [ 4.2466]])
定义 初始化模型参数
w = torch.normal(0, 0.01, size=(2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
定义模型
def linreg(X, w, b):
"""线性回归模型。"""
return torch.matmul(X, w) + b
定义损失函数
def squared_loss(y_hat, y):
"""均方损失。"""
return (y_hat - y.reshape(y_hat.shape))**2 / 2
定义优化算法
def sgd(params, lr, batch_size):
"""小批量随机梯度下降。"""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
训练过程
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y)
l.sum().backward()
sgd([w, b], lr, batch_size)
with torch.no_grad():
train_l = loss(net(features, w, b), labels)
print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')
epoch 1, loss 0.033103 epoch 2, loss 0.000124 epoch 3, loss 0.000054
比较真实参数和通过训练学到的参数来评估训练的成功程度
print(f'w的估计误差: {true_w - w.reshape(true_w.shape)}')
print(f'b的估计误差: {true_b - b}')
w的估计误差: tensor([ 0.0006, -0.0004], grad_fn=<SubBackward0>) b的估计误差: tensor([0.0003], grad_fn=<RsubBackward1>)