python - 使用自定义损失函数时出现 TensorFlow 错误
问题描述
这是我收到的错误消息:
断言失败:[10 250 250] [11] [[node ssim_loss/SSIM/Assert_2/Assert(定义在 D:\Documents\neural_network\image_upscaler\nn2.py:33)]] [Op:__inference_train_function_7302]
函数调用栈:train_function
这是我的代码
def ssim_loss(y_true, y_pred):
y_pred = tf.cast(y_pred, tf.float32)
y_true = tf.cast(y_true, tf.float32)
return tf.reduce_mean(tf.image.ssim(y_true, y_pred, 2.0))
def relu_bn(inputs):
relu = ReLU()(inputs)
bn = BatchNormalization()(relu)
return bn
def build_model_shi(upscale_factor=2, channels=1):
conv_args = {
"activation": "relu",
"kernel_initializer": "Orthogonal",
"padding": "same",
}
inputs = keras.Input(shape=(None, None, channels))
x = layers.Conv2D(128, 5, **conv_args)(inputs)
x = layers.Conv2D(128, 3, **conv_args)(x)
x = layers.Conv2D(32, 3, **conv_args)(x)
x = layers.Conv2D(channels * (upscale_factor ** 2), 3, **conv_args)(x)
outputs = tf.nn.depth_to_space(x, upscale_factor)
model = keras.Model(inputs, outputs)
#loss_fn = keras.losses.MeanSquaredError()
loss_fn = ssim_loss
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss=loss_fn,)
return model
def load_data(PATH):
start = time()
xte = np.load(PATH + "xtef.npy")
print("xte loaded")
xtr = np.load(PATH + "xtrf.npy")
print("xtr loaded")
ytr = np.load(PATH + "ytrf.npy")
print("ytr loaded")
yte = np.load(PATH + "ytef.npy")
print("yte loaded")
end = time()
print("loaded in",end - start,"seconds")
xtr = np.array(xtr,"float16")
ytr = np.array(ytr,"float16")
xte = np.array(xte,"float16")
yte = np.array(yte,"float16")
return xtr,xte,ytr,yte
'''print("loading dataset")
X = np.load(PATH + "X.npy")
Y = np.load(PATH + "Y.npy")
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.05, random_state=42)
return X_train, X_test, y_train, y_test'''
def train(PATH,BATCH_SIZE,EPOCHS):
xtr, xte, ytr, yte = load_data(PATH)
checkpoint = ModelCheckpoint(PATH + "model.hdf5", monitor='val_accuracy', verbose=1, save_best_only=True, mode='max') #checkpointing it
callbacks_list = [checkpoint,EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)]
model = build_model_shi()
history = model.fit(xtr, ytr, epochs=EPOCHS, batch_size=BATCH_SIZE,validation_data=(xte,yte),callbacks=callbacks_list) #train time
results = model.predict(xte)
np.save(PATH + "model_op",results)
np.save(PATH + "model_ip",xte)
np.save(PATH + "model_gt",yte)
del xtr, xte, ytr, yte
train(PATH,BATCH_SIZE,EPOCHS)
当我使用 MSE 作为损失函数时,该模型工作得很好。但是当我使用自己的“ssim_loss”作为我的损失函数时,它给了我这个错误。任何帮助表示赞赏。
解决方案
我想到了!
结果是传递给 ssim 函数的张量的形状需要重新整形。
def ssim_loss(y_true, y_pred):
y_pred = tf.reshape(y_pred,[-1,250,250,1])
y_true = tf.reshape(y_true,[-1,250,250,1])
return 1 - tf.reduce_mean(tf.image.ssim(y_true, y_pred,max_val = 255))
在我的情况下,图像尺寸为 250x250x1
推荐阅读
- cordova - ionic4中的报警应用
- database - 访问 - 选择查询或交叉表?
- java - 如何在 openshift 上正确部署 quarkus
- html - 使用 Symfony 渲染 Twig 并将其插入另一个 Twig
- c# - 统一无法识别 Visual Studio
- django - AWS Elastic Beanstalk:WSGI 路径不正确?
- java - 使用 Maven 管理 Eclipse 插件项目的依赖项
- mariadb - 使用 CTE 和动态 SQL 时 MariaDB 中出现 SQL 错误 (1146)。我究竟做错了什么?
- java - Spring AMQP 在队列 Bean 上设置 prefetchCount
- angular - 运行 docker build 时 Npm run build 失败