keras - keras reshape 中输入维度的错误
问题描述
我正在尝试将尺寸为 (1,512,512,9) 的张量“输出”重塑为 R 中 Keras 中尺寸为 (1,512*512, 9) 的张量“输出重塑”。这应该是有史以来最简单的事情,但出于某种原因它不是。
这是我正在使用的代码:
output_reshaped = layer_reshape(output ,target_shape = c( w*h, class) )
当我尝试拟合模型时,会弹出以下错误
Error in py_call_impl(callable, dots$args, dots$keywords) :
ValueError: Error when checking target: expected reshape_16 to have 3 dimensions, but got array with shape (1, 512, 512, 9)
这里出了什么问题?根据 keras 文档,这个函数应该能够处理任意输入形状?
参数
library(keras)
library(EBImage)
library(raster)
epochs = 280
batch_size = 1L
parts = 4
h = as.integer(512) #heigth image dim image = 2448
w = as.integer(512) #width image
channels = 3L
class = 9L
pick = 100L
path = '/media/daniel/6DA3F120567E843D/schelde'
train = readRDS( file.path(path, 'train.rds'))
完整模型
input_img = layer_input(shape = c(w, h, channels))
#background_mask = layer_input(shape = c(w, h, channels))
l0 = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same", activation = 'relu' )(input_img)
l0 = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same", activation = 'relu' )(l0)
l1 = layer_max_pooling_2d(pool_size = c(2,2))(l0)
l1 = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same", activation = 'relu' )(l1)
l1 = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same", activation = 'relu' )(l1)
l2 = layer_max_pooling_2d(pool_size = c(2,2))(l1)
l2 = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same", activation = 'relu' )(l2)
l2 = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same", activation = 'relu' )(l2)
l3 = layer_max_pooling_2d(pool_size = c(2,2))(l2)
l3 = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l3)
l3 = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l3)
l4 = layer_max_pooling_2d(pool_size = c(2,2))(l3)
l4 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same", activation = 'relu' )(l4)
l4 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same", activation = 'relu' )(l4)
l5 = layer_max_pooling_2d(pool_size = c(2,2))(l4)
l5 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same", activation = 'relu' )(l5)
l5 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same", activation = 'relu' )(l5)
l6 = layer_max_pooling_2d(pool_size = c(2,2))(l5)
l6 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same", activation = 'relu' )(l6)
l6 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same", activation = 'relu' )(l6)
l5_up = layer_conv_2d_transpose( filters = 512 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l6)
l5_up = layer_concatenate( list(l5,l5_up))
l5_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l5_up)
l5_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l5_up)
l4_up = layer_conv_2d_transpose( filters = 512 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l5)
l4_up = layer_concatenate( list(l4,l4_up))
l4_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l4_up)
l4_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l4_up)
l3_up = layer_conv_2d_transpose( filters = 512 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l4)
l3_up = layer_concatenate( list(l3,l3_up))
l3_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l3_up)
l3_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same", activation = 'relu' )(l3_up)
l2_up = layer_conv_2d_transpose( filters = 256 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l3_up)
l2_up = layer_concatenate( list(l2,l2_up))
l2_up = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same", activation = 'relu' )(l2_up)
l2_up = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same", activation = 'relu' )(l2_up)
l1_up = layer_conv_2d_transpose( filters = 128 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l2_up)
l1_up = layer_concatenate( list(l1,l1_up))
l1_up = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same", activation = 'relu' )(l1_up)
l1_up = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same", activation = 'relu' )(l1_up)
l0_up = layer_conv_2d_transpose( filters = 64 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l1_up)
l0_up = layer_concatenate( list(l0,l0_up))
l0_up = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same", activation = 'relu' )(l0_up)
l0_up = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same", activation = 'relu' )(l0_up)
output = layer_conv_2d( filter=class, kernel_size=c(1,1),padding="same", activation = 'softmax' )(l0_up)
output_reshaped = layer_reshape(output , batch_input_shape = c(1L, w, h, class) ,target_shape = c( w*h, class) )
model = keras_model(inputs = input_img, outputs = output_reshaped)
opt<-optimizer_adam( lr= 0.0001 , decay = 0, clipnorm = 1 )
compile(model, loss="categorical_crossentropy", optimizer=opt, metrics = "accuracy")
运行模型
input_im = readImage( file.path(path, train$images[i]))
input_im = array(input_im, dim = c(1, dim(input_im)))
input_lab = raster::as.matrix(raster( file.path(path, train$labels[i]))) +1
input_lab = apply(input_lab, c(1,2), function(x){
z = rep(0, class)
z[x] = 1
z
})
input_lab = aperm(input_lab, c(2,3,1))
input_lab = array(input_lab, dim = c(1, dim(input_lab)))
model$fit( x= input_im, y= input_lab, batch_size = batch_size, epochs = 1L )
解决方案
推荐阅读
- javascript - javascript将数组作为值添加到对象
- java - Aws Lambda -“errorMessage”:“没有名为 'entityManagerFactory' 的 bean 可用”
- python - 尝试修复 TypeError:freqRolls() 缺少 1 个必需的位置参数:'sides' python
- performance - 更快的迭代
- angular - ionic3 谷歌地图未显示在导航堆栈中
- c# - 坚持如何为 NumberList 创建 DeleteBelow/Above 和 CountBelow/Above 方法
- python - 为什么地理编码器(1.38.1)输出无?
- mysql - MySQL ---从多个表中选择的显式内部连接
- python - 如果随机游走在随机初始化值的范围内,则停止条件
- python - 创建一个函数 generateString(char, val),它返回一个字符串,其中 val 个 char 字符连接在一起