首页 > 解决方案 > TensorFlow 2.1.0:_FallbackException:此函数不处理所有输入都不是 EagerTensors 的路径的情况

问题描述

我正在编写一个自定义损失函数,它涉及另一个(已经训练过的)神经网络,称为 model_weight 评估对我当前正在训练的模型输入的预测:

def weight(d):
  f = model_weight.predict(d,steps = 1)
  return (f)/(1-f)

myinputs = Input(shape=(1,), dtype = tf.float32)
x = Dense(128, activation='relu')(myinputs)
x2 = Dense(128, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x2)
model = Model(inputs=myinputs, outputs=predictions)
model.summary()

def my_loss_wrapper(inputs,val=0):
  x  = inputs
  theta = 0. #starting value
  #theta0 = tf.constant(val, dtype= tf.float32)#target value

  #creating tensor (filled with 1s) with same shape as inputs; multiply by val
  theta0_stack = K.ones_like(x, dtype=tf.float32)*val 

  #combining and reshaping into correct format:
  data = K.stack((x, theta0_stack), axis=-1) 
  data = K.squeeze(data, axis = 1)
  #slice data to 500 entries to match batch_size
  data = K.gather(data, np.arange(500))
  print(data.shape)

  w = weight(data)

  def my_loss(y_true,y_pred):
    t_loss = K.mean(y_true*(y_true - y_pred)**2+(w)**2*(1.-y_true)*(y_true - y_pred)**2)
    return t_loss

  return my_loss

model.compile(optimizer='adam', loss=my_loss_wrapper(myinputs,theta),metrics=['accuracy'])
model.fit(np.array(X_train), y_train, epochs=1, batch_size=500,validation_data=(np.array(X_test), y_test),verbose=1)

当 weight() 在“数据”上调用 model_weight.predict 时出现错误。这个函数适用于我用严格值初始化的常规张量(例如

tf.Tensor(
[[1 5]
 [2 5]
 [3 5]], shape=(3, 2), dtype=int32))

但不在包含“x”或“输入”的张量上。模型尝试编译时的错误消息:

_FallbackException                        Traceback (most recent call last)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py in tensor_dataset(components, output_shapes, name)
   5590         _ctx._context_handle, tld.device_name, "TensorDataset", name,
-> 5591         tld.op_callbacks, components, "output_shapes", output_shapes)
   5592       return _result

_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.

During handling of the above exception, another exception occurred:

AttributeError                            Traceback (most recent call last)
<ipython-input-19-f74b0047add5> in <module>
      4 
      5 for theta in thetas:
----> 6     model.compile(optimizer='adam', loss=my_loss_wrapper(myinputs,theta),metrics=['accuracy'])
      7     model.fit(np.array(X_train), y_train, epochs=1, batch_size=500,validation_data=(np.array(X_test), y_test),verbose=1)
      8     lvals+=[model.history.history['val_loss']]

<ipython-input-18-3ac14760abd7> in my_loss_wrapper(inputs, val)
     24     print(tf.executing_eagerly())
     25     print(weight(test))
---> 26     w = weight(data)
     27 
     28     def my_loss(y_true,y_pred):

<ipython-input-14-c58e20e1428a> in weight(d)
     27 '''
     28 def weight(d):
---> 29     f = model_weight.predict(d,steps = 1)
     30     return (f)/(1-f)

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
   1011         max_queue_size=max_queue_size,
   1012         workers=workers,
-> 1013         use_multiprocessing=use_multiprocessing)
   1014 
   1015   def reset_metrics(self):

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in predict(self, model, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing, **kwargs)
    496         model, ModeKeys.PREDICT, x=x, batch_size=batch_size, verbose=verbose,
    497         steps=steps, callbacks=callbacks, max_queue_size=max_queue_size,
--> 498         workers=workers, use_multiprocessing=use_multiprocessing, **kwargs)
    499 
    500 

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in _model_iteration(self, model, mode, x, y, batch_size, verbose, sample_weight, steps, callbacks, max_queue_size, workers, use_multiprocessing, **kwargs)
    424           max_queue_size=max_queue_size,
    425           workers=workers,
--> 426           use_multiprocessing=use_multiprocessing)
    427       total_samples = _get_total_number_of_samples(adapter)
    428       use_sample = total_samples is not None

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in _process_inputs(model, mode, x, y, batch_size, epochs, sample_weights, class_weights, shuffle, steps, distribution_strategy, max_queue_size, workers, use_multiprocessing)
    704       max_queue_size=max_queue_size,
    705       workers=workers,
--> 706       use_multiprocessing=use_multiprocessing)
    707 
    708   return adapter

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, sample_weight_modes, batch_size, epochs, steps, shuffle, **kwargs)
    355     indices_dataset = indices_dataset.flat_map(slice_batch_indices)
    356 
--> 357     dataset = self.slice_inputs(indices_dataset, inputs)
    358 
    359     if shuffle == "batch":

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/data_adapter.py in slice_inputs(self, indices_dataset, inputs)
    381     dataset = dataset_ops.DatasetV2.zip((
    382         indices_dataset,
--> 383         dataset_ops.DatasetV2.from_tensors(inputs).repeat()
    384     ))
    385 

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py in from_tensors(tensors)
    564       Dataset: A `Dataset`.
    565     """
--> 566     return TensorDataset(tensors)
    567 
    568   @staticmethod

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py in __init__(self, element)
   2769     variant_tensor = gen_dataset_ops.tensor_dataset(
   2770         self._tensors,
-> 2771         output_shapes=structure.get_flat_tensor_shapes(self._structure))
   2772     super(TensorDataset, self).__init__(variant_tensor)
   2773 

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py in tensor_dataset(components, output_shapes, name)
   5594       try:
   5595         return tensor_dataset_eager_fallback(
-> 5596             components, output_shapes=output_shapes, name=name, ctx=_ctx)
   5597       except _core._SymbolicException:
   5598         pass  # Add nodes to the TensorFlow graph.

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py in tensor_dataset_eager_fallback(components, output_shapes, name, ctx)
   5627         "'tensor_dataset' Op, not %r." % output_shapes)
   5628   output_shapes = [_execute.make_shape(_s, "output_shapes") for _s in output_shapes]
-> 5629   _attr_Toutput_types, components = _execute.convert_to_mixed_eager_tensors(components, ctx)
   5630   _inputs_flat = list(components)
   5631   _attrs = ("Toutput_types", _attr_Toutput_types, "output_shapes",

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/eager/execute.py in convert_to_mixed_eager_tensors(values, ctx)
    281 def convert_to_mixed_eager_tensors(values, ctx):
    282   v = [ops.convert_to_tensor(t, ctx=ctx) for t in values]
--> 283   types = [t._datatype_enum() for t in v]  # pylint: disable=protected-access
    284   return types, v
    285 

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/eager/execute.py in <listcomp>(.0)
    281 def convert_to_mixed_eager_tensors(values, ctx):
    282   v = [ops.convert_to_tensor(t, ctx=ctx) for t in values]
--> 283   types = [t._datatype_enum() for t in v]  # pylint: disable=protected-access
    284   return types, v
    285 

AttributeError: 'Tensor' object has no attribute '_datatype_enum'

标签: pythontensorflow

解决方案


推荐阅读