首页 > 解决方案 > ModuleNotFoundError:没有名为“custom_layers”的模块

问题描述

我正在设计segnet。运行代码时,我在 MaxUnpooling2D 中遇到错误

InvalidArgumentError:索引 [shape=[6422528,1]] 的维度 [0,1) 必须与更新 [shape=[12845056]] [[node model_5/max_unpooling2d_28/ScatterNd 的维度 [0,1) 匹配(定义在 C:\ spyder\codes\custom_layers\layers.py:65) ]] [Op:__inference_train_function_29658]

函数调用栈:train_function

如何清除这个?

SIZE=224
channels=4
output_mode="sigmoid"
num_filters=64
inputs=tf.keras.layers.Input((SIZE,SIZE,channels))
s=tf.keras.layers.Lambda(lambda X:X/255)(inputs)
conv_1 = Convolution2D(num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(inputs)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
conv_1 = Convolution2D(num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_1)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv_1)

conv_2 = Convolution2D(2 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(pool_1)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
conv_2 = Convolution2D(2 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_2)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv_2)

conv_3 = Convolution2D(4 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(pool_2)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_3 = Convolution2D(4 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_3)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_3 = Convolution2D(4 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_3)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)

pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv_3)

conv_4 = Convolution2D(8 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(pool_3)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
conv_4 = Convolution2D(8 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_4)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
conv_4 = Convolution2D(8 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_4)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)

pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size=(2, 2))(conv_4)

unpool_1 = MaxUnpooling2D(size=(2, 2))([pool_4, mask_4])

conv_5 = Convolution2D(8 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(unpool_1)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_5 = Convolution2D(8 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_5)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_5 = Convolution2D(8 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_5)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)

unpool_2 = MaxUnpooling2D(size=(2, 2))([conv_5, mask_3])

conv_6 = Convolution2D(4 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(unpool_2)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_6 = Convolution2D(4 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_6)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_6 = Convolution2D(4 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_6)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)

unpool_3 = MaxUnpooling2D(size=(2, 2))([conv_6, mask_2])

conv_7 = Convolution2D(2 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(unpool_3)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)
conv_7 = Convolution2D(2 * num_filters, (3, 3), padding="same", kernel_initializer='he_uniform')(conv_7)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)

unpool_4 = MaxUnpooling2D(size=(2, 2))([conv_7, mask_1])

conv_8 = Convolution2D(num_filters, (1, 1), padding="same", kernel_initializer='he_uniform')(unpool_4)
conv_8 = BatchNormalization()(conv_8)
outputs = Activation(output_mode)(conv_8)
conv_8 = Convolution2D(num_filters, (1, 1), padding="same", kernel_initializer='he_uniform')(conv_8)
conv_8 = BatchNormalization()(conv_8)
outputs = Activation(output_mode)(conv_8)
conv_8 = Convolution2D(1, (1, 1), padding="same", kernel_initializer='he_uniform')(conv_8)
conv_8 = BatchNormalization()(conv_8)
outputs = Activation(output_mode)(conv_8)






model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()



#results = model.fit(X_train, y_train, validation_split=0.1, batch_size=16, epochs=25)

hist1 = model.fit(X_train,y_train,validation_data=(X_test,y_test),batch_size=32,epochs=5,verbose=2)


import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer


class MaxPoolingWithArgmax2D(Layer):

    def __init__(
            self,
            pool_size=(2, 2),
            strides=(2, 2),
            padding='same',
            **kwargs):
        super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
        self.padding = padding
        self.pool_size = pool_size
        self.strides = strides

    def call(self, inputs, **kwargs):
        padding = self.padding
        pool_size = self.pool_size
        strides = self.strides
        ksize = [1, *pool_size, 1]
        padding = padding.upper()
        strides = [1, *strides, 1]
        output, argmax = tf.nn.max_pool_with_argmax(
            inputs,
            ksize=ksize,
            strides=strides,
            padding=padding)

        argmax = K.cast(argmax, K.floatx())
        return [output, argmax]

    def compute_output_shape(self, input_shape):
        ratio = (1, 2, 2, 1)
        output_shape = [
            dim // ratio[idx]
            if dim is not None else None
            for idx, dim in enumerate(input_shape)]
        output_shape = tuple(output_shape)
        return [output_shape, output_shape]

    def compute_mask(self, inputs, mask=None):
        return 2 * [None]


class MaxUnpooling2D(Layer):
    def __init__(self, size=(2, 2), **kwargs):
        super(MaxUnpooling2D, self).__init__(**kwargs)
        self.size = size

    def call(self, inputs, output_shape=None):
        updates, mask = inputs[0], inputs[1]
        mask = K.cast(mask, 'int32')
        input_shape = tf.shape(updates, out_type='int32')

        if output_shape is None:
            output_shape = (
                input_shape[0],
                input_shape[1] * self.size[0],
                input_shape[2] * self.size[1],
                input_shape[3])

        ret = tf.scatter_nd(K.expand_dims(K.flatten(mask)),
                            K.flatten(updates),
                            [K.prod(output_shape)])

        input_shape = updates.shape
        out_shape = [-1,
                     input_shape[1] * self.size[0],
                     input_shape[2] * self.size[1],
                     input_shape[3]]
        return K.reshape(ret, out_shape)

    def compute_output_shape(self, input_shape):
        mask_shape = input_shape[1]
        return (
            mask_shape[0],
            mask_shape[1] * self.size[0],
            mask_shape[2] * self.size[1],
            mask_shape[3]
        )

标签: pythontensorflowmodellayer

解决方案


推荐阅读