首页 > 解决方案 > Tensorflow模型分析,TFMA for keras模型

问题描述

我想将 TFMA 与 keras 模型一起使用。keras 模型是使用 TF 2.0 alpha 创建的。该模型是一个带有分类层的预训练模型:

_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
vgg16 (Model)                (None, 6, 6, 512)         14714688  
_________________________________________________________________
flatten (Flatten)            (None, 18432)             0         
_________________________________________________________________
dense_5 (Dense)              (None, 2)                 36866     

up_one_dir方法是将文件复制到模型的根文件夹的实用程序功能。这些文件将由export_eval_savedmodel.

TFX / TFMA 代码使用以下版本:

TFMA version: 0.13.2
TFDV version: 0.13.1
TF version: 1.13.1

代码是:

# Load model 
new_model = keras.models.load_model(model_name)
new_model.summary()
# keras model to estimator
estimator_model = tf.keras.estimator.model_to_estimator(new_model,model_dir=TF_MODEL_DIR)]]

# The receiver function for the estimator
def eval_input_receiver_1_fn():
      serialized_tf_example = tf.compat.v1.placeholder(dtype=tf.string, shape=[None], name='input_example_placeholder')
    receiver_tensors = {'examples': serialized_tf_example}
    validation_features_columns = [tf.feature_column.numeric_column("image",shape=(192,192)),
                                     tf.feature_column.categorical_column_with_vocabulary_list("label",["normal_healthy","sick"])]
      feature_spec =  tf.feature_column.make_parse_example_spec(validation_features_columns)
    features = tf.io.parse_example(serialized_tf_example, feature_spec)
    return tfma.export.EvalInputReceiver(
            features=features,
            receiver_tensors=receiver_tensors,
            labels=features['label'])

import os
import shutil
from pathlib import Path
    
def up_one_dir(path):
    """Move all file in path up one"""
    parent_dir = str(Path(path).parents[0])
    for f in os.listdir(path):
        shutil.copy(os.path.join(path,f),parent_dir)
    #shutil.rmtree(path)
    up_one_dir(KERAS_FOLDER)

    tfma.export.export_eval_savedmodel(estimator=estimator_model,
                                       export_dir_base=EXPORT_DIR,
                                       eval_input_receiver_fn=eval_input_receiver_1_fn)

关于预训练模型特征的以下错误被触发:

KeyErrorTraceback (most recent call last)
<ipython-input-137-b275096a314a> in <module>()
      1 tfma.export.export_eval_savedmodel(estimator=estimator_model,
      2                                    export_dir_base=EXPORT_DIR,
----> 3                                    eval_input_receiver_fn=eval_input_receiver_1_fn)

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_model_analysis/util.pyc in wrapped_fn(*args, **kwargs)
    171                       (fn.__name__, kwargs.keys()))
    172 
--> 173     return fn(**kwargs_to_pass)
    174 
    175   return wrapped_fn

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_model_analysis/eval_saved_model/export.pyc in export_eval_savedmodel(estimator, export_dir_base, eval_input_receiver_fn, serving_input_receiver_fn, assets_extra, checkpoint_path)
    472       },
    473       assets_extra=assets_extra,
--> 474       checkpoint_path=checkpoint_path)
    475 
    476 

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow/python/util/deprecation.pyc in new_func(*args, **kwargs)
    322               'in a future version' if date is None else ('after %s' % date),
    323               instructions)
--> 324       return func(*args, **kwargs)
    325     return tf_decorator.make_decorator(
    326         func, new_func, 'deprecated',

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/contrib/estimator/python/estimator/export.pyc in export_all_saved_models(estimator, export_dir_base, input_receiver_fn_map, assets_extra, as_text, checkpoint_path)
    206       assets_extra=assets_extra,
    207       as_text=as_text,
--> 208       checkpoint_path=checkpoint_path)

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.pyc in experimental_export_all_saved_models(self, export_dir_base, input_receiver_fn_map, assets_extra, as_text, checkpoint_path)
    820         self._add_meta_graph_for_mode(
    821             builder, input_receiver_fn_map, checkpoint_path,
--> 822             save_variables, mode=model_fn_lib.ModeKeys.EVAL)
    823         save_variables = False
    824       if input_receiver_fn_map.get(model_fn_lib.ModeKeys.PREDICT):

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.pyc in _add_meta_graph_for_mode(self, builder, input_receiver_fn_map, checkpoint_path, save_variables, mode, export_tags, check_variables)
    895           labels=getattr(input_receiver, 'labels', None),
    896           mode=mode,
--> 897           config=self.config)
    898 
    899       export_outputs = model_fn_lib.export_outputs_for_mode(

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.pyc in _call_model_fn(self, features, labels, mode, config)
   1110 
   1111     logging.info('Calling model_fn.')
-> 1112     model_fn_results = self._model_fn(features=features, **kwargs)
   1113     logging.info('Done calling model_fn.')
   1114 

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in model_fn(features, labels, mode)
    276 
    277     model = _clone_and_build_model(mode, keras_model, custom_objects, features,
--> 278                                    labels)
    279     model_output_names = []
    280     # We need to make sure that the output names of the last layer in the model

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in _clone_and_build_model(mode, keras_model, custom_objects, features, labels)
    184   K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)
    185   input_tensors, target_tensors = _convert_estimator_io_to_keras(
--> 186       keras_model, features, labels)
    187 
    188   compile_clone = (mode != model_fn_lib.ModeKeys.PREDICT)

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in _convert_estimator_io_to_keras(keras_model, features, labels)
    157 
    158   input_tensors = _to_ordered_tensor_list(
--> 159       features, input_names, 'features', 'inputs')
    160   target_tensors = _to_ordered_tensor_list(
    161       labels, output_names, 'labels', 'outputs')

/usr/local/envs/py2env/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/keras.pyc in _to_ordered_tensor_list(obj, key_order, obj_name, order_name)
    139                 order_name=order_name, order_keys=set(key_order),
    140                 obj_name=obj_name, obj_keys=set(obj.keys()),
--> 141                 different_keys=different_keys))
    142 
    143       return [_convert_tensor(obj[key]) for key in key_order]

KeyError: "The dictionary passed into features does not have the expected inputs keys defined in the keras model.\n\tExpected keys: set([u'vgg16_input'])\n\tfeatures keys: set(['image', 'label'])\n\tDifference: set(['image', 'label', u'vgg16_input'])"

我的问题是:

  1. 可以使用 tfdv - tensorflow-data-validation 提取特征吗?架构实用程序?

  2. 可以eval_input_receiver_1_fn将方法替换为使用数据集 API 的方法:

def eval_input_receiver_fn():
    validation_dataset = get_batched_dataset(validation_filenames)
    return validation_dataset

任何帮助/参考表示赞赏。谢谢,埃拉兰

标签: tensorflow-datasetstensorflow-estimatortf.kerastensorflow-model-analysistensorflow-data-validation

解决方案


Keras works a bit different than estimators (even when using model_to_estimator). There are a few things:

1) Keras requires input feature names match the input layer name

It appears that you did not define an InputLayer in your keras model so keras created a default one named after your first layer (e.g. vgg16 -> vgg16_input). Your incoming features use the name 'images'. You can either create an input layer with the name 'images' or rename the parsed feature key to 'vgg16_input'.

2) Unlike estimators, keras requires that you only pass features used by the model.

You are passing both 'labels' and 'images' as features, you need to pop the labels from the features dict.

All that said, TFMA does not yet have full support for TF 2.0. You might have better luck with running from head vs alpha, but it is still under development.


推荐阅读