python - 将 pytorch 模型转换为 Coreml 时出错。层有 1 个输入,但预计至少有 2 个
问题描述
我的目标是将我的 Pytorch 模型转换为 Coreml。我对使用 pytorch 进行推理没有问题。但是,在我跟踪我的模型并尝试转换它之后
trace = torch.jit.trace(traceable_model, data)
mlmodel = ct.convert(
trace,
inputs=[ct.TensorType(name="Image", shape=data.shape)])
我收到以下错误Error compiling model: "Error reading protobuf spec. validator error: Layer 'cur_layer_input.1' of type 925 has 1 inputs but expects at least 2."
我的模型中有一个 ConvLSTM 层,带有cur_layer
. 这是里面的东西。
class ConvLSTM(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
#I cut out some of the init
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor=input_tensor.permute(1, 0, 2, 3, 4)
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=input_tensor.size(0))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output = layer_output.permute(1, 0, 2, 3, 4)
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
我不太明白需要为 2 的输入在哪里。
解决方案
通常,coremltools 转换器会忽略模型中他们不理解的部分。这导致转换显然成功,但实际上错过了模型的某些部分。