通过使用深度学习框架来简洁地实现 线性回归模型 生成数据集
import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
true_w = torch.tensor([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
调用框架中现有的API来读取数据
def load_array(data_arrays, batch_size, is_train=True):
"""构造一个PyTorch数据迭代器。"""
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
batch_size = 10
data_iter = load_array((features, labels), batch_size)
next(iter(data_iter))
[tensor([[-0.3503, -1.4372], [-1.0654, 0.4921], [ 0.3208, 0.4166], [-2.5884, -0.9802], [-1.6644, -0.0677], [ 0.7457, 1.1798], [-1.4640, 1.7983], [ 0.0327, -1.6439], [-0.8879, -1.6287], [-0.8156, -1.5579]]), tensor([[ 8.3842], [ 0.3884], [ 3.4247], [ 2.3456], [ 1.0868], [ 1.6896], [-4.8205], [ 9.8460], [ 7.9508], [ 7.8745]])]
使用框架的预定义好的层
from torch import nn
net = nn.Sequential(nn.Linear(2, 1))
初始化模型参数
net[0].weight.data.normal_(0, 0.01)
net[0].bias.data.fill_(0)
tensor([0.])
计算均方误差使用的是MSELoss
类,也称为平方 $L_2$ 范数
loss = nn.MSELoss()
实例化 SGD
实例
trainer = torch.optim.SGD(net.parameters(), lr=0.03)
训练过程代码与我们从零开始实现时所做的非常相似
num_epochs = 3
for epoch in range(num_epochs):
for X, y in data_iter:
l = loss(net(X), y)
trainer.zero_grad()
l.backward()
trainer.step()
l = loss(net(features), labels)
print(f'epoch {epoch + 1}, loss {l:f}')
epoch 1, loss 0.000232 epoch 2, loss 0.000101 epoch 3, loss 0.000102
比较生成数据集的真实参数和通过有限数据训练获得的模型参数
w = net[0].weight.data
print('w的估计误差:', true_w - w.reshape(true_w.shape))
b = net[0].bias.data
print('b的估计误差:', true_b - b)
w的估计误差: tensor([1.3828e-05, 9.5677e-04]) b的估计误差: tensor([0.0005])