首页 > 解决方案 > 错误:“logits 和标签必须具有相同的形状 ((?, 1) vs (?,))” - 在 Keras 模型到 Estimator

问题描述

我是 ML 新手,正在尝试 Keras 模型。我想了解 Keras 模型,tf.Estimator并因此将各种示例中的代码拼凑在一起。该代码包含三个关键部分 - 第一部分TFRecord通过从文件夹中读取一组图像来创建文件,第二部分从TFRecord文件中读取并提供给input_fn,第三部分是带有 VGG16 和简单全连接层的 Keras 模型。

虽然代码的前两部分工作正常,但我收到了这个错误 -

'ValueError: logits and labels must have the same shape ((?, 1) vs (?,))' 

训练时。正如另一个类似问题的答案中所引用的,我尝试expand-dim甚至重塑了我的labelsto (?, 1),但这给了我一些其他错误。

我不确定这段代码有什么问题和原因

'ValueError: logits and labels must have the same shape ((?, 1) vs (?,))'. 

请帮助识别错误并提出解决方案。提前致谢。我从下面的笔记本中复制粘贴了我的代码。


**Cell1 contents**

from random import shuffle
import glob
import sys
import cv2
import numpy as np
#import skimage.io as io
import tensorflow as tf

def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

def load_image(addr):
    # read an image and resize to (224, 224)
    # cv2 load images as BGR, convert it to RGB
    img = cv2.imread(addr)
    if img is None:
        return None
    img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return img

def createDataRecord(out_filename, addrs, labels):
    # open the TFRecords file
    #writer = tf.python_io.TFRecordWriter(out_filename)
    options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
    writer = tf.python_io.TFRecordWriter(out_filename, options=options)

    for i in range(len(addrs)):
        # print how many images are saved every 1000 images
        if not i % 1000:
            print('Train data: {}/{}'.format(i, len(addrs)))
            sys.stdout.flush()
        # Load the image
        img = load_image(addrs[i])

        label = labels[i]

        if img is None:
            continue

        # Create a feature
        feature = {
            'image': _bytes_feature(img.tostring()),
            'label': _int64_feature(label)
        }
        # Create an example protocol buffer
        example = tf.train.Example(features=tf.train.Features(feature=feature))

        # Serialize to string and write on the file
        writer.write(example.SerializeToString())

    writer.close()
    sys.stdout.flush()

**Cell2 contents**

cat_dog_train_path = 'dataset-mix/*/*.jpg'
# read addresses and labels from the 'train' folder
addrs = glob.glob(cat_dog_train_path)
labels = [0 if 'A' in addr else 1 for addr in addrs]  # 0 = A, 1 = B

# to shuffle data
c = list(zip(addrs, labels))
shuffle(c)
addrs, labels = zip(*c)

#print("addrs: " + str(addrs))
#print("labels: " + str(labels))

# Divide the data into 60% train, 20% validation, and 20% test
train_addrs = addrs[0:int(0.6*len(addrs))]
train_labels = labels[0:int(0.6*len(labels))]
val_addrs = addrs[int(0.6*len(addrs)):int(0.8*len(addrs))]
val_labels = labels[int(0.6*len(addrs)):int(0.8*len(addrs))]
test_addrs = addrs[int(0.8*len(addrs)):]
test_labels = labels[int(0.8*len(labels)):]

print("train_addrs: " + str(train_addrs))
print("train_labels: " + str(train_labels))

createDataRecord('train-zipped.tfrecords', train_addrs, train_labels)
createDataRecord('val-zipped.tfrecords', val_addrs, val_labels)
createDataRecord('test-zipped.tfrecords', test_addrs, test_labels)

------
**second part**
**Cell3 contents**

import tensorflow as tf
import cv2
import sys
import numpy as np

sess = tf.Session()
sess.run(tf.global_variables_initializer())

def parser(record):
    keys_to_features = {
        "image": tf.FixedLenFeature([], tf.string),
        "label": tf.FixedLenFeature([], tf.int64)
    }

    parsed = tf.parse_single_example(record, keys_to_features)
    image = tf.decode_raw(parsed["image"], tf.uint8)
    image = tf.cast(image, tf.float32)
    image = tf.reshape(image, shape=[224, 224, 3])
    label = tf.cast(parsed["label"], tf.int32)

    #return {'image': image}, label
    print("image.shape: " + str(np.shape(image)))
    print("label.shape before: " + str(np.shape(label)) + "; label: " + str(label))

    #label = np.array(label)
    #print("label.shape after making np.array: " + str(np.shape(label)))
    #label = np.expand_dims(label, axis=1)
    #label = np.reshape(label, (-1, 1))
    print("labels1.shape after expand_dims" + str(np.shape(label)))

    return {'vgg16_input': image}, label


def input_fn(filenames=["train-zipped.tfrecords"]):

  #To create a TFRecordDataset to read the compressed files:
  dataset = tf.data.TFRecordDataset(filenames=filenames, compression_type='GZIP', num_parallel_reads=40)

  dataset = dataset.apply(
      tf.contrib.data.shuffle_and_repeat(1024, 1)
  )
  dataset = dataset.apply(
      tf.contrib.data.map_and_batch(parser, 32)
  )
  #dataset = dataset.map(parser, num_parallel_calls=12)
  #dataset = dataset.batch(batch_size=1000)

  #print("dataset: " + str(dataset.make_one_shot_iterator().get_next()))

  dataset = dataset.prefetch(buffer_size=2)

  return dataset


def train_input_fn():
    return input_fn(filenames=["train-zipped.tfrecords", "test-zipped.tfrecords"])

def val_input_fn():
    return input_fn(filenames=["val-zipped.tfrecords"])

dat_set = train_input_fn()
item1 = dat_set.make_one_shot_iterator().get_next()[1]
res = sess.run(item1)
print("dataset: " + str(np.shape(res)))

#train_dataset = train_input_fn("train-zipped.tfrecords", "test-zipped.tfrecords")
#val_dataset = val_input_fn("val-zipped.tfrecords")

#print("classes: {0};\n shapes: {1};\n types: {2}\n".format(train_dataset.output_classes, 
#            train_dataset.output_shapes, train_dataset.output_types))

-----------
**third part**
**Cell4 contents**

def make_keras_estimator(): 
    from tensorflow.python.keras.applications.vgg16 import VGG16
    from tensorflow.python.keras import models
    from tensorflow.python.keras import layers
    import os

    conv_base = VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(224, 224, 3))
    conv_base.trainable = False

    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer=tf.keras.optimizers.RMSprop(lr=2e-5),
                  metrics=['acc'])

    model_dir = os.path.join(os.getcwd(), "model-keras-mix")
    os.makedirs(model_dir, exist_ok=True)
    print("model_dir: ",model_dir)
    model.summary()

    print("input_names" + str(model.input_names ))

    model_est = tf.keras.estimator.model_to_estimator(keras_model=model,
                                                        model_dir=model_dir)
    return model_est

**Cell5 contents**
model_keras = make_keras_estimator()

train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=10)
eval_spec = tf.estimator.EvalSpec(input_fn=val_input_fn)

import time
start_time = time.time()
tf.estimator.train_and_evaluate(model_keras, train_spec, eval_spec)
print("--- %s seconds ---" % (time.time() - start_time))

标签: pythontensorflowkerastensorflow-datasets

解决方案


我认为错误消息表明标签应该是形状 (1,) 而不仅仅是一个标量。

'ValueError:logits 和标签必须具有相同的形状 ((?, 1) vs (?,))'

我建议的解决方案是在“标签”的功能规范中添加一个“1”。

def parser(record):
    keys_to_features = {
        "image": tf.FixedLenFeature([], tf.string),
        "label": tf.FixedLenFeature([1], tf.int64)  # add the 1 in this line
    }
    ......

另外,由于模型输出(sigmoid)是float32,我认为你应该将label转换为float32而不是int32。

    label = tf.cast(parsed["label"], tf.float32)

推荐阅读