python - 具有顺序的pytorch concat层
问题描述
所以我之前多次使用 nn.Sequential 但现在我遇到了一个奇怪的错误,其中一层会将 3 个输出发送到下一层而不是两个(如预期的那样)
我已将我的图层定义为:
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout=0.0, batch_first=False):
super(BidirectionalGRU, self).__init__()
self.BiGRU = nn.GRU(
input_size=rnn_dim,
hidden_size=hidden_size,
num_layers=1,
batch_first=batch_first,
bidirectional=True,
)
def forward(self, x, hidden):
x, hidden = self.BiGRU(x)
return x, hidden
我在这里的代码中使用它:
class Listener(nn.Module):
def __init__(
self, input_feature_dim_listener, hidden_size_listener, num_layers_listener
):
super(Listener, self).__init__()
assert num_layers_listener >= 1, "Listener should have at least 1 layer"
self.hidden_size = hidden_size_listener
self.gru_1 = nn.Sequential(
BidirectionalGRU(
rnn_dim=input_feature_dim_listener,
hidden_size=hidden_size_listener,
batch_first=True,
),
BidirectionalGRU(
rnn_dim=hidden_size_listener * 2,
hidden_size=hidden_size_listener,
batch_first=True,
),
BidirectionalGRU(
rnn_dim=hidden_size_listener * 2,
hidden_size=hidden_size_listener,
batch_first=True,
),
BidirectionalGRU(
rnn_dim=hidden_size_listener * 2,
hidden_size=hidden_size_listener,
batch_first=True,
),
)
def initHidden(self):
return torch.zeros([2, 8, 512])
def forward(self, x):
x = x.squeeze().permute(0, 2, 1)
fake_hidden = self.initHidden()
output, hidden = self.gru_1(x, fake_hidden)
#output, hidden = self.gru_2(output, hidden)
#output, hidden = self.gru_3(output, hidden)
#output, hidden = self.gru_4(output, hidden)
return output, hidden
这不起作用并给我一个错误:
las(spectrograms, spectrograms, 0.5)
File "/Users/jaime/anaconda3/envs/torch/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "caca.py", line 116, in forward
listener_feature, hidden = self.listener(batch_data)
File "/Users/jaime/anaconda3/envs/torch/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "caca.py", line 84, in forward
output, hidden = self.gru_1(x, fake_hidden)
File "/Users/jaime/anaconda3/envs/torch/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() takes 2 positional arguments but 3 were given
但是如果我手动定义每一层而不是使用nn.Sequential
并传递输出,隐藏自己然后它可以工作:
class Listener(nn.Module):
def __init__(
self, input_feature_dim_listener, hidden_size_listener, num_layers_listener
):
super(Listener, self).__init__()
assert num_layers_listener >= 1, "Listener should have at least 1 layer"
self.hidden_size = hidden_size_listener
self.gru_1 = BidirectionalGRU(
rnn_dim=input_feature_dim_listener,
hidden_size=hidden_size_listener,
batch_first=True,
)
self.gru_2 = BidirectionalGRU(
rnn_dim=hidden_size_listener * 2,
hidden_size=hidden_size_listener,
batch_first=True,
)
self.gru_3 = BidirectionalGRU(
rnn_dim=hidden_size_listener * 2,
hidden_size=hidden_size_listener,
batch_first=True,
)
self.gru_4 = BidirectionalGRU(
rnn_dim=hidden_size_listener * 2,
hidden_size=hidden_size_listener,
batch_first=True,
)
def initHidden(self):
return torch.zeros([2, 8, 512])
def forward(self, x):
x = x.squeeze().permute(0, 2, 1)
fake_hidden = self.initHidden()
output, hidden = self.gru_1(x, fake_hidden)
output, hidden = self.gru_2(output, hidden)
output, hidden = self.gru_3(output, hidden)
output, hidden = self.gru_4(output, hidden)
return output, hidden
这完美地工作。我想让我的代码基于一个参数创建多个层,并nn.Sequential
在内部使用 for 循环将允许它。
解决方案
推荐阅读
- azure-devops - 在为 Dynamics 365 Field 服务导出解决方案时,在 Azure DevOps 中提供的解决方案名称是什么?
- kubeflow-pipelines - 如何在 Kubeflow Pipelines 中指定 InputPath 或 OutputPath 的本地路径
- java - JSP Input value To Java Method -> HttpServletRequest 给出 NULL 值
- javascript - 如何在代码和框中禁用对 SVG 导入的特殊处理?
- oracle - 如何在保持数据完整性的同时对乱序的行重新排序?
- python - GenericRelation 的 Django m2m_changed 信号,有可能吗?
- c - 已解决:使用 Do While 和 For 的例程会产生未知错误 GCC 核心转储(段错误)
- php - 什么时候通过php get"?"导入csv数据
- azure - 如何通过特定 URL 直接访问 Azure 应用服务实例?
- javascript - ReactCrop 以原始分辨率裁剪图像,而不是使用 CSS 更改