首页 > 解决方案 > 为什么设置eval()后pytorch模型表现不佳?

问题描述

我使用 pytorch 构建了一个使用 BatchNormalization 层的分割模型。我发现当我model.eval()在测试上设置时,测试结果会是0。如果我不设置model.eval(),它会表现良好。

我试图搜索相关问题,但我得到了model.eval()可以修复参数的结论BN,但我仍然对如何解决这个问题感到困惑。

我的 batchsize 是 1,这是我的模型:

import torch
import torch.nn as nn


class Encode_Block(nn.Module):
    def __init__(self, in_feat, out_feat):
        super(Encode_Block, self).__init__()

        self.conv1 = Res_Block(in_feat, out_feat)
        self.conv2 = Res_Block_identity(out_feat, out_feat)

    def forward(self, inputs):
        outputs = self.conv1(inputs)
        outputs = self.conv2(outputs)
        return outputs


class Decode_Block(nn.Module):
    def __init__(self, in_feat, out_feat):
        super(Decode_Block, self).__init__()

        self.conv1 = Res_Block(in_feat, out_feat)
        self.conv2 = Res_Block_identity(out_feat, out_feat)

    def forward(self, inputs):
        outputs = self.conv1(inputs)
        outputs = self.conv2(outputs)
        return outputs


class Conv_Block(nn.Module):
    def __init__(self, in_feat, out_feat):
        super(Conv_Block, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_feat, out_feat, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(),
        )

    def forward(self, inputs):
        outputs = self.conv1(inputs)
        return outputs


def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(
        in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
    )


def conv1x1(in_planes, out_planes, stride=1):
    """1x1 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)


class Res_Block(nn.Module):
    def __init__(self, inplanes, planes, stride=1):
        super(Res_Block, self).__init__()
        self.conv_input = conv1x1(inplanes, planes)
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn = nn.BatchNorm2d(planes)
        self.relu = nn.LeakyReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.conv3 = conv1x1(planes, planes)
        self.stride = stride

    def forward(self, x):
        residual = self.conv_input(x)

        out = self.conv1(x)
        out = self.bn(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn(out)

        out += residual
        out = self.relu(out)

        return out


class Res_Block_identity(nn.Module):
    def __init__(self, inplanes, planes, stride=1):
        super(Res_Block_identity, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn = nn.BatchNorm2d(planes)
        self.relu = nn.LeakyReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.conv3 = conv1x1(planes, planes)
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn(out)

        out += residual
        out = self.relu(out)

        return out


class UpConcat(nn.Module):
    def __init__(self, in_feat, out_feat):
        super(UpConcat, self).__init__()

        self.de_conv = nn.ConvTranspose2d(in_feat, out_feat, kernel_size=2, stride=2)

    def forward(self, inputs, down_outputs):
        outputs = self.de_conv(inputs)
        out = torch.cat([down_outputs, outputs], 1)
        return out


class Res_UNet(nn.Module):
    def __init__(self, num_channels=1, num_classes=1):
        super(Res_UNet, self).__init__()
        flt = 64
        self.down1 = Encode_Block(num_channels, flt)
        self.down2 = Encode_Block(flt, flt * 2)
        self.down3 = Encode_Block(flt * 2, flt * 4)
        self.down4 = Encode_Block(flt * 4, flt * 8)
        self.down_pool = nn.MaxPool2d(kernel_size=2)
        self.bottom = Encode_Block(flt * 8, flt * 16)
        self.up_cat1 = UpConcat(flt * 16, flt * 8)
        self.up_conv1 = Decode_Block(flt * 16, flt * 8)
        self.up_cat2 = UpConcat(flt * 8, flt * 4)
        self.up_conv2 = Decode_Block(flt * 8, flt * 4)
        self.up_cat3 = UpConcat(flt * 4, flt * 2)
        self.up_conv3 = Decode_Block(flt * 4, flt * 2)
        self.up_cat4 = UpConcat(flt * 2, flt)
        self.up_conv4 = Decode_Block(flt * 2, flt)
        self.final = nn.Sequential(
            nn.Conv2d(flt, num_classes, kernel_size=1), nn.Sigmoid()
        )

    def forward(self, inputs):
        down1_feat = self.down1(inputs)
        pool1_feat = self.down_pool(down1_feat)
        down2_feat = self.down2(pool1_feat)
        pool2_feat = self.down_pool(down2_feat)
        down3_feat = self.down3(pool2_feat)
        pool3_feat = self.down_pool(down3_feat)
        down4_feat = self.down4(pool3_feat)
        pool4_feat = self.down_pool(down4_feat)

        bottom_feat = self.bottom(pool4_feat)

        up1_feat = self.up_cat1(bottom_feat, down4_feat)
        up1_feat = self.up_conv1(up1_feat)
        up2_feat = self.up_cat2(up1_feat, down3_feat)
        up2_feat = self.up_conv2(up2_feat)
        up3_feat = self.up_cat3(up2_feat, down2_feat)
        up3_feat = self.up_conv3(up3_feat)
        up4_feat = self.up_cat4(up3_feat, down1_feat)
        up4_feat = self.up_conv4(up4_feat)

        outputs = self.final(up4_feat)

        return outputs

模型在设置后完全无法分割model.eval(),但模型在model.eval()移除后是好的。我对此感到困惑,model.eval()在测试中有必要吗?

标签: pythondeep-learningpytorchimage-segmentation

解决方案


BatchNorm 层在训练期间不断对其计算的均值和方差进行估计,model.train()然后在评估期间用于归一化model.eval()

每一层都有其输出/激活的均值和方差的统计数据。由于您self.bn = nn.BatchNorm2d(planes)多次重用 BatchNorm 层,因此静态数据会混淆,并不代表实际的均值和方差。因此,您应该在每次使用时创建一个新的 BatchNorm 层。

编辑:我刚刚读到您的 batch_size 是 1,这也可能是您问题的核心:请参阅Tensorflow and Batch Normalization with Batch Size==1 => Outputs all zeros


推荐阅读