问题描述
尽管已经有很多关于这个主题的答案,但在下面的示例(从变分循环网络上的 https://gist.github.com/lirnli/c16ef186c75588e705d9864fb816a13c 摘录)中没有看到哪个输入和输出维度触发了错误。
尝试更改 torch.cat
中的维度并抑制对 squeeze()
的调用,错误仍然存在,
<ipython-input-51-cdc928891ad7> in generate(self,hidden,temperature)
56 x_sample = x = x_out.div(temperature).exp().multinomial(1).squeeze()
57 x = self.phi_x(x)
---> 58 tc = torch.cat([x,z],dim=1)
59
60 hidden_next = self.rnn(tc,hidden)
IndexError: Dimension out of range (expected to be in range of [-1,0],but got 1)
因此如何塑造x
中的维度和z
中的tc = torch.cat([x,dim=1)
?
注意代码如下,
import torch
from torch import nn,optim
from torch.autograd import Variable
class VRNNCell(nn.Module):
def __init__(self):
super(VRNNCell,self).__init__()
self.phi_x = nn.Sequential(nn.Embedding(128,64),nn.Linear(64,nn.ELU())
self.encoder = nn.Linear(128,64*2) # output hyperparameters
self.phi_z = nn.Sequential(nn.Linear(64,nn.ELU())
self.decoder = nn.Linear(128,128) # logits
self.prior = nn.Linear(64,64*2) # output hyperparameters
self.rnn = nn.GRUCell(128,64)
def forward(self,x,hidden):
x = self.phi_x(x)
# 1. h => z
z_prior = self.prior(hidden)
# 2. x + h => z
z_infer = self.encoder(torch.cat([x,hidden],dim=1))
# sampling
z = Variable(torch.randn(x.size(0),64))*z_infer[:,64:].exp()+z_infer[:,:64]
z = self.phi_z(z)
# 3. h + z => x
x_out = self.decoder(torch.cat([hidden,dim=1))
# 4. x + z => h
hidden_next = self.rnn(torch.cat([x,dim=1),hidden)
return x_out,hidden_next,z_prior,z_infer
def calculate_loss(self,hidden):
x_out,z_infer = self.forward(x,hidden)
# 1. logistic regression loss
loss1 = nn.functional.cross_entropy(x_out,x)
# 2. KL divergence between Multivariate Gaussian
mu_infer,log_sigma_infer = z_infer[:,:64],z_infer[:,64:]
mu_prior,log_sigma_prior = z_prior[:,z_prior[:,64:]
loss2 = (2*(log_sigma_infer-log_sigma_prior)).exp() \
+ ((mu_infer-mu_prior)/log_sigma_prior.exp())**2 \
- 2*(log_sigma_infer-log_sigma_prior) - 1
loss2 = 0.5*loss2.sum(dim=1).mean()
return loss1,loss2,hidden_next
def generate(self,hidden=None,temperature=None):
if hidden is None:
hidden=Variable(torch.zeros(1,64))
if temperature is None:
temperature = 0.8
# 1. h => z
z_prior = self.prior(hidden)
# sampling
z = Variable(torch.randn(z_prior.size(0),64))*z_prior[:,64:].exp()+z_prior[:,:64]
z = self.phi_z(z)
# 2. h + z => x
x_out = self.decoder(torch.cat([hidden,dim=1))
# sampling
x_sample = x = x_out.div(temperature).exp().multinomial(1).squeeze()
x = self.phi_x(x)
# 3. x + z => h
# hidden_next = self.rnn(torch.cat([x,hidden)
tc = torch.cat([x,dim=1)
hidden_next = self.rnn(tc,hidden)
return x_sample,hidden_next
def generate_text(self,temperature=None,n=100):
res = []
hidden = None
for _ in range(n):
x_sample,hidden = self.generate(hidden,temperature)
res.append(chr(x_sample.data[0]))
return "".join(res)
# Test
net = VRNNCell()
x = Variable(torch.LongTensor([12,13,14]))
hidden = Variable(torch.rand(3,64))
output,z_infer,z_prior = net(x,hidden)
loss1,_ = net.calculate_loss(x,loss2
hidden = Variable(torch.zeros(1,64))
net.generate_text()
解决方法
错误
IndexError: Dimension out of range(预计在 [-1,0] 范围内,但得到 1)
表示您正在尝试访问张量中不存在的索引。例如,以下代码会导致您遇到相同的 IndexError
。
# sample input tensors
In [210]: x = torch.arange(4)
In [211]: z = torch.arange(6)
# trying to concatenate along the second dimension
# but the tensors have only one dimension (i.e.,`0`).
In [212]: torch.cat([x,z],dim=1)
因此,克服这个问题的一种方法是在串联之前将张量提升到更高的维度,如果这是您需要的话。
# promoting tensors to 2D before concatenation
In [216]: torch.cat([x[None,:],z[None,:]],dim=1)
Out[216]: tensor([[0,1,2,3,4,5]])
因此,在您的情况下,您必须分析和了解 x
需要什么形状,以便它可以沿维度 1 与 z
连接,然后通过 tc
作为 self.rnn()
和 hidden
的输入。
据我所知, x[None,:]
、 z[None,:]
应该可以工作。
成功训练的调试
您发布的代码是为 PyTorch v0.4.1
编写的。从那时起,PyTorch Python API 发生了很多变化,但代码没有更新。
以下是使代码成功运行和训练所需的更改。复制以下函数并将其粘贴到代码中的适当位置。
def generate(self,hidden=None,temperature=None):
if hidden is None:
hidden=Variable(torch.zeros(1,64))
if temperature is None:
temperature = 0.8
# 1. h => z
z_prior = self.prior(hidden)
# sampling
z = Variable(torch.randn(z_prior.size(0),64))*z_prior[:,64:].exp()+z_prior[:,:64]
z = self.phi_z(z)
# 2. h + z => x
x_out = self.decoder(torch.cat([hidden,dim=1))
# sampling
x_sample = x = x_out.div(temperature).exp().multinomial(1).squeeze()
x = self.phi_x(x)
# 3. x + z => h
x = x[None,...] # changed here
xz = torch.cat([x,dim=1) # changed here
hidden_next = self.rnn(xz,hidden) # changed here
return x_sample,hidden_next
def generate_text(self,temperature=None,n=100):
res = []
hidden = None
for _ in range(n):
x_sample,hidden = self.generate(hidden,temperature)
res.append(chr(x_sample.data)) # changed here
return "".join(res)
for epoch in range(max_epoch):
batch = next(g)
loss_seq = 0
loss1_seq,loss2_seq = 0,0
optimizer.zero_grad()
for x in batch:
loss1,loss2,hidden = net.calculate_loss(Variable(x),hidden)
loss1_seq += loss1.data # changed here
loss2_seq += loss2.data # changed here
loss_seq = loss_seq + loss1+loss2
loss_seq.backward()
optimizer.step()
hidden.detach_()
if epoch%100==0:
print('>> epoch {},loss {:12.4f},decoder loss {:12.4f},latent loss {:12.4f}'.format(epoch,loss_seq.data,loss1_seq,loss2_seq)) # changed here
print(net.generate_text())
print()
注意:在这些更改之后,我最后的训练循环在 PyTorch v1.7.1
上没有任何错误。查看带有 # changed here
的评论以了解更改。