本系列实验使用了PyTorch深度学习框架,相关操作如下:
conda create -n DL python=3.7
conda activate DL
pip install torch==1.8.1+cu102 torchvision==0.9.1+cu102 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
conda install matplotlib
conda install scikit-learn
软件包 | 本实验版本 | 目前最新版 |
---|---|---|
matplotlib | 3.5.3 | 3.8.0 |
numpy | 1.21.6 | 1.26.0 |
python | 3.7.16 | |
scikit-learn | 0.22.1 | 1.3.0 |
torch | 1.8.1+cu102 | 2.0.1 |
torchaudio | 0.8.1 | 2.0.2 |
torchvision | 0.9.1+cu102 | 0.15.2 |
def get_params(vocab_size, num_hiddens, device):
num_inputs = num_outputs = vocab_size
def normal(inputs, hiddens):
ctx = device
param = torch.rand((inputs, hiddens))
param.to(ctx)
return param
def three():
return (normal(num_inputs, num_hiddens),
normal(num_hiddens, num_hiddens),
torch.zeros(num_hiddens, device=device))
W_xz, W_hz, b_z = three() # 更新门参数
W_xr, W_hr, b_r = three() # 重置门参数
W_xh, W_hh, b_h = three() # 候选隐状态参数
# 输出层参数
W_hq = normal(num_hiddens, num_outputs)
b_q = torch.zeros(num_outputs, device=device)
# 附加梯度
params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]
for param in params:
param.requires_grad_(True)
return params
get_params
函数用于初始化模型的参数。它接受三个参数:vocab_size
表示词汇表的大小,num_hiddens
表示隐藏单元的数量,device
表示模型所在的设备(如 CPU 或 GPU)。
vocab_size
初始化 num_inputs
和 num_outputs
,它们的值都等于 vocab_size
。normal
,该函数用于生成一个服从均匀分布的随机参数矩阵。这个函数返回一个形状为 (inputs, hiddens)
的随机参数矩阵,并将其移动到指定的设备上。three
,该函数用于生成三个参数组成的元组。这三个参数分别表示更新门参数、重置门参数和候选隐状态参数。three
函数分别初始化了更新门参数 W_xz, W_hz, b_z
,重置门参数 W_xr, W_hr, b_r
,候选隐状态参数 W_xh, W_hh, b_h
。normal
函数初始化了输出层参数 W_hq
和 b_q
。params
中,并设置它们的 requires_grad
属性为 True
,表示这些参数需要计算梯度。params
。def init_gru_state(batch_size, num_hiddens, device):
return (torch.zeros((batch_size, num_hiddens), device=device), )
init_gru_state
函数用于初始化隐藏状态,作为时间步 t=0 时的输入。它接受三个参数:batch_size
表示批次大小,num_hiddens
表示隐藏单元的数量,device
表示模型所在的设备。
torch.zeros
函数创建并返回一个形状为 (batch_size, num_hiddens)
的全零张量,表示初始的隐藏状态。def gru(inputs, state, params):
W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
# @符号为矩阵乘法的运算符号
for X in inputs:
Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)
R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)
H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)
H = Z * H + (1 - Z) * H_tilda
Y = H @ W_hq + b_q
outputs.append(Y)
return torch.cat(outputs, dim=0), (H,)
gru
函数是实现门控循环单元的关键部分,接受三个参数:inputs
表示输入序列,state
表示隐藏状态,params
表示模型的参数。
params
中解包出更新门参数、重置门参数、候选隐状态参数以及输出层参数。inputs
。X
和当前的隐藏状态 H
,计算更新门 Z
、重置门 R
和候选隐状态 H_tilda
。H
。H
计算输出 Y
。Y
添加到输出列表 outputs
中。torch.cat
函数将输出列表中的所有输出连接起来,得到一个形状为 (seq_length * batch_size, num_outputs)
的张量,表示模型在整个序列上的输出。(H,)
。batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
vocab_size, num_hiddens, num_epochs, lr= 28, 256, 200, 1
device = try_gpu()
d2l.load_data_time_machine
函数加载了训练数据,并设置了一些训练超参数。model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params,
init_gru_state, gru)
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
model
;d2l.train_ch8
函数对该模型进行训练。gru_layer = nn.GRU(vocab_size, num_hiddens)
model_gru = RNNModel(gru_layer, vocab_size)
train(model_gru, train_iter, vocab, lr, num_epochs, device)
model_gru
,并对其进行训练。注:本实验使用Pytorch库的GRU类,不使用自定义的GRU函数
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
vocab_size, num_hiddens, num_epochs, lr= 28, 256, 200, 1
device = try_gpu()
gru_layer = nn.GRU(vocab_size, num_hiddens)
model_gru = RNNModel(gru_layer, vocab_size)
train(model_gru, train_iter, vocab, lr, num_epochs, device)
print(predict('time ', 10, model_gru, vocab, device))
# 导入必要的库
import torch
from torch import nn
import torch.nn.functional as F
from d2l import torch as d2l
import math
class RNNModel(nn.Module):
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T.long(), self.vocab_size)
X = X.to(torch.float32)
Y, state = self.rnn(X, state)
# 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数)
# 它的输出形状是(时间步数*批量大小,词表大小)。
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
# 在第一个时间步,需要初始化一个隐藏状态,由此函数实现
def begin_state(self, device, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
# nn.GRU以张量作为隐状态
return torch.zeros((self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens),
device=device)
else:
# nn.LSTM以元组作为隐状态
return (torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device),
torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device))
def train(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[10, num_epochs])
if isinstance(net, nn.Module):
updater = torch.optim.SGD(net.parameters(), lr)
else:
updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
for epoch in range(num_epochs):
ppl, speed = train_epoch(
net, train_iter, loss, updater, device, use_random_iter)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, [ppl])
print('Train Done!')
torch.save(net.state_dict(), 'chapter6.pth')
print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')
def train_epoch(net, train_iter, loss, updater, device, use_random_iter):
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # 训练损失之和,词元数量
for X, Y in train_iter:
if state is None or use_random_iter:
# 在第一次迭代或使用随机抽样时初始化state
state = net.begin_state(batch_size=X.shape[0], device=device)
if isinstance(net, nn.Module) and not isinstance(state, tuple):
# state对于nn.GRU是个张量
state.detach_()
else:
# state对于nn.LSTM或对于我们从零开始实现的模型是个张量
for s in state:
s.detach_()
y = Y.T.reshape(-1)
X, y = X.to(device), y.to(device)
y_hat, state = net(X, state)
l = loss(y_hat, y.long()).mean()
if isinstance(updater, torch.optim.Optimizer):
updater.zero_grad()
l.backward()
grad_clipping(net, 1)
updater.step()
else:
l.backward()
grad_clipping(net, 1)
# 因为已经调用了mean函数
updater(batch_size=1)
metric.add(l * d2l.size(y), d2l.size(y))
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def predict(prefix, num_preds, net, vocab, device):
state = net.begin_state(batch_size=1, device=device)
outputs = [vocab[prefix[0]]]
get_input = lambda: torch.reshape(torch.tensor(
[outputs[-1]], device=device), (1, 1))
for y in prefix[1:]: # 预热期
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds): # 预测num_preds步
y, state = net(get_input(), state)
outputs.append(int(y.argmax(dim=1).reshape(1)))
return ''.join([vocab.idx_to_token[i] for i in outputs])
def grad_clipping(net, theta):
if isinstance(net, nn.Module):
params = [p for p in net.parameters() if p.requires_grad]
else:
params = net.params
norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def try_gpu(i=0):
# """如果存在,则返回gpu(i),否则返回cpu()"""
# # if torch.cuda.device_count() >= i + 1:
# # return torch.device(f'cuda:{i}')
return torch.device('cpu')
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
vocab_size, num_hiddens, num_epochs, lr = 28, 256, 200, 1
device = try_gpu()
gru_layer = nn.GRU(vocab_size, num_hiddens)
model_gru = RNNModel(gru_layer, vocab_size)
train(model_gru, train_iter, vocab, lr, num_epochs, device)
print(predict('time ', 10, model_gru, vocab, device))