首页 > 解决方案 > 传递给参数“shape”的值的数据类型 float32 不在允许值列表中:int32、int64

问题描述

def build_model(dropout=0.2, lstm_units=200, fc_hidden=100):
    # prepare data
    train_x, train_y = to_supervised(train, n_input)
    # define parameters
    verbose, epochs, batch_size = 0, 30, 24
    n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
    # reshape output into [samples, timesteps, features]
    train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
    # define model
    model = Sequential()
    model.add(LSTM(lstm_units, activation='relu', input_shape=(n_timesteps, n_features)))
    model.add(Dropout(dropout))
    model.add(RepeatVector(n_outputs))
    model.add(LSTM(lstm_units, activation='relu', return_sequences=True))
    model.add(Dropout(dropout))
    model.add(TimeDistributed(Dense(fc_hidden, activation='relu')))
    model.add(TimeDistributed(Dense(1)))
    model.compile(loss='mse', optimizer='adam')
#   early_stop = EarlyStopping(monitor='loss', patience=10, verbose=0)
    # fit network
    history=model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose, validation_split=0.1, shuffle=False)
    return history.history['loss'][-1]
def bayesian_opt():
    optimizer = BayesianOptimization(
        f=build_model,
        pbounds={'dropout': (0.0, 0.5), 'lstm_units': (32, 250), 'fc_hidden': (32, 256),},
    )

    optimizer.maximize(
        init_points=10,
        n_iter=30,
    )

bayesian_opt()

Traceback (most recent call last):

  File "<ipython-input-75-7ba49e57a6c9>", line 13, in <module>
    bayesian_opt()

  File "<ipython-input-75-7ba49e57a6c9>", line 9, in bayesian_opt
    n_iter=30,

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\bayes_opt\bayesian_optimization.py", line 174, in maximize
    self.probe(x_probe, lazy=False)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\bayes_opt\bayesian_optimization.py", line 112, in probe
    self._space.probe(params)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\bayes_opt\target_space.py", line 194, in probe
    target = self.target_func(**params)

  File "<ipython-input-74-db3bb0c0b2de>", line 11, in build_model
    model.add(LSTM(lstm_units, activation='relu', input_shape=(n_timesteps, n_features)))

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\engine\sequential.py", line 166, in add
    layer(x)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\layers\recurrent.py", line 536, in __call__
    return super(RNN, self).__call__(inputs, **kwargs)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 463, in __call__
    self.build(unpack_singleton(input_shapes))

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\layers\recurrent.py", line 497, in build
    self.cell.build(step_input_shape)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\layers\recurrent.py", line 1914, in build
    constraint=self.kernel_constraint)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 279, in add_weight
    weight = K.variable(initializer(shape, dtype=dtype),

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\initializers.py", line 227, in __call__
    dtype=dtype, seed=self.seed)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py", line 4357, in random_uniform
    shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\keras\backend.py", line 5253, in random_uniform
    shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\ops\random_ops.py", line 247, in random_uniform
    rnd = gen_random_ops.random_uniform(shape, dtype, seed=seed1, seed2=seed2)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_random_ops.py", line 858, in random_uniform
    name=name)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 626, in _apply_op_helper
    param_name=input_name)

  File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 60, in _SatisfiesTypeConstraint
    ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))

TypeError: Value passed to parameter 'shape' has DataType float32 not in list of allowed values: int32, int64

train 和 n_input 是全局变量

传递给参数“shape”的值的数据类型 float32 不在允许值列表中:int32、int64

我试图整合所有相关的形状,但它没有用

形状有什么问题?

def _SatisfiesTypeConstraint(dtype, attr_def, param_name):

如果 attr_def.HasField("allowed_values"):

allowed_list = attr_def.allowed_values.list.type

if dtype not in allowed_list:

  raise TypeError(

      "Value passed to parameter '%s' has DataType %s not in list of "

      "allowed values: %s" %

      (param_name, dtypes.as_dtype(dtype).name,

       ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))

这是 _SatisfiesTypeConstraint(dtype, attr_def, param_name)

标签: pythontensorflowkeras

解决方案


数据类型似乎有问题。

在处理这些问题时,您可能想使用 tf.cast 将浮点数转换为整数。

请参考以下代码。

x = tf.constant([1.8, 2.2], dtype=tf.float32)

tf.dtypes.cast(x, tf.int32) # [1, 2], dtype=tf.int32

您也可以在此处参考 TensorFlow 官方文档


推荐阅读