首页 > 解决方案 > TensorFlow:SparseSoftmaxCrossEntropyWithLogits 错误?

问题描述

我现在正在尝试遵循 tensorflow 的入门指南,但我遇到了障碍。我没有看到任何对我看到的错误的唯一支持,而且我相信我的代码不会与他们网站上的内容相差太远。

代码:

import tensorflow as tf;
import tensorflow.contrib.eager as tfe;

tf.enable_eager_execution();

iris_dataset_url = 'http://download.tensorflow.org/data/iris_training.csv';
iris_csv_file = tf.keras.utils.get_file('iris_dataset.csv', iris_dataset_url);

iris_dataset_tests_url = 'http://download.tensorflow.org/data/iris_test.csv';
iris_tests_csv_file = tf.keras.utils.get_file('iris_tests_dataset.csv', iris_dataset_tests_url);

def iris_data_parse_line(line):
    default_feature = [[0.0], [0.0], [0.0], [0.0], [0.0]];
    parsed_line = tf.decode_csv(line, default_feature);

    features = tf.reshape(parsed_line[:-1], shape=(4,), name="features");
    label = tf.reshape(parsed_line[-1], shape=(), name="label");

    return features, label;

def prediction_loss_diff(features, label, model):
    predicted_label = model(features);
    return tf.losses.sparse_softmax_cross_entropy(label, predicted_label);

def gradient_tune(features, targets, model):
    with tf.GradientTape() as tape:
        prediction_loss = prediction_loss_diff(features, targets, model);
    return tape.gradient(prediction_loss, model.variables);

def train_model(training_dataset, model, optimizer):
    train_loss_results = []
    train_accuracy_results = []
    rounds = 201;


    for round_num in range(rounds):
        epoch_loss_avg = tfe.metrics.Mean();
        epoch_accuracy = tfe.metrics.Accuracy();

        for features, label in training_dataset:
            gradients = gradient_tune(features, label, model);
            optimizer.apply_gradients(
                    zip(gradients, model.variables),
                    global_step=tf.train.get_or_create_global_step());



def main():
    print("TensorFlow version: {}".format(tf.VERSION));
    print("Eager execution: {}".format(tf.executing_eagerly()));

    iris_dataset = (tf.data.TextLineDataset(iris_csv_file)
                           .skip(1)
                           .map(iris_data_parse_line)
                           .shuffle(1000)
                           .batch(32));

    model = tf.keras.Sequential([
        tf.keras.layers.Dense(10, activation="relu", input_shape=(4,)),
        tf.keras.layers.Dense(10, activation="relu"),
        tf.keras.layers.Dense(3)
    ]);

    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01);

    train_model(iris_dataset, model, optimizer);

if __name__ == "__main__":
    main();

该错误似乎发生在 prediction_loss_diff 函数中。假设使用 sparse_softmax_cross_entropy 损失函数。但是我不断收到错误消息:

Traceback (most recent call last):
  File "main.py", line 69, in <module>
    main();
  File "main.py", line 66, in main
    train_model(iris_dataset, model, optimizer);
  File "main.py", line 41, in train_model
    gradients = gradient_tune(features, label, model);
  File "main.py", line 27, in gradient_tune
    prediction_loss = prediction_loss_diff(features, targets, model);
  File "main.py", line 23, in prediction_loss_diff
    return tf.losses.sparse_softmax_cross_entropy(label, predicted_label);
  File "/usr/local/lib/python3.6/site-packages/tensorflow/python/ops/losses/losses_impl.py", line 853, in sparse_softmax_cross_entropy
    name="xentropy")
  File "/usr/local/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 2050, in sparse_softmax_cross_entropy_with_logits
    precise_logits, labels, name=name)
  File "/usr/local/lib/python3.6/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 7504, in sparse_softmax_cross_entropy_with_logits
    _six.raise_from(_core._status_to_exception(e.code, message), None)
  File "<string>", line 2, in raise_from
tensorflow.python.framework.errors_impl.InternalError: Could not find valid device for node name: "SparseSoftmaxCrossEntropyWithLogits"
op: "SparseSoftmaxCrossEntropyWithLogits"
input: "dummy_input"
input: "dummy_input"
attr {
  key: "T"
  value {
    type: DT_FLOAT
  }
}
attr {
  key: "Tlabels"
  value {
    type: DT_FLOAT
  }
}

我不确定“它没有找到有效的节点设备”是什么意思,但我认为它可能与较低 C 包装中的某些东西有关?我的输入不好吗?任何帮助都会很棒,谢谢。

标签: pythontensorflowmachine-learning

解决方案


这是一个输入问题。奇怪的是,错误消息并没有完全洞察到底是什么冲突。但是,查看入门指南和我的代码差异时,我注意到我的default_feature变量已使用所有浮点数进行了初始化。我需要我的标签是一个整数才能正确标记数据。由于 sparse_softmax_cross_entropy 函数的输出是一个标签,即 int32/64,它不能与浮点数进行比较。这将导致 C 绑定出现异常错误,并导致所述异常。有关 sparse_softmax_cross_entropy 损失函数的更多信息。

所以,而不是[[0.0], [0.0], [0.0], [0.0], [0.0]];它应该是[[0.0], [0.0], [0.0], [0.0], [0]];

最终代码:

import tensorflow as tf;
import tensorflow.contrib.eager as tfe;

tf.enable_eager_execution();

iris_dataset_url = 'http://download.tensorflow.org/data/iris_training.csv';
iris_csv_file = tf.keras.utils.get_file('iris_dataset.csv', iris_dataset_url);

iris_dataset_tests_url = 'http://download.tensorflow.org/data/iris_test.csv';
iris_tests_csv_file = tf.keras.utils.get_file('iris_tests_dataset.csv', iris_dataset_tests_url);

def iris_data_parse_line(line):
    default_feature = [[0.0], [0.0], [0.0], [0.0], [0]]; #UPDATED SPOT!!!
    parsed_line = tf.decode_csv(line, default_feature);

    features = tf.reshape(parsed_line[:-1], shape=(4,), name="features");
    label = tf.reshape(parsed_line[-1], shape=(), name="label");

    return features, label;

def prediction_loss_diff(features, label, model):
    predicted_label = model(features);
    return tf.losses.sparse_softmax_cross_entropy(label, predicted_label);

def gradient_tune(features, targets, model):
    with tf.GradientTape() as tape:
        prediction_loss = prediction_loss_diff(features, targets, model);
    return tape.gradient(prediction_loss, model.variables);

def train_model(training_dataset, model, optimizer):
    train_loss_results = []
    train_accuracy_results = []
    rounds = 201;


    for round_num in range(rounds):
        epoch_loss_avg = tfe.metrics.Mean();
        epoch_accuracy = tfe.metrics.Accuracy();

        for features, label in training_dataset:
            gradients = gradient_tune(features, label, model);
            optimizer.apply_gradients(
                    zip(gradients, model.variables),
                    global_step=tf.train.get_or_create_global_step());



def main():
    print("TensorFlow version: {}".format(tf.VERSION));
    print("Eager execution: {}".format(tf.executing_eagerly()));

    iris_dataset = (tf.data.TextLineDataset(iris_csv_file)
                           .skip(1)
                           .map(iris_data_parse_line)
                           .shuffle(1000)
                           .batch(32));

    model = tf.keras.Sequential([
        tf.keras.layers.Dense(10, activation="relu", input_shape=(4,)),
        tf.keras.layers.Dense(10, activation="relu"),
        tf.keras.layers.Dense(3)
    ]);

    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01);

    train_model(iris_dataset, model, optimizer);

if __name__ == "__main__":
    main();

推荐阅读