首页 > 解决方案 > 为什么使用相同的数据集进行训练和测试会给出不同的准确度?

问题描述

我一直在研究训练和验证数据集的损失函数,我一直看到验证损失小于训练损失,即使它们是相同的数据集。我试图了解为什么会这样。

我正在 tensorflow 中训练一个模型来预测一些时间序列数据。因此,模型创建和预处理如下:

window_size = 40
batch_size  = 32
forecast_period = 6
model_name = "LSTM"
tf.keras.backend.clear_session()

_seed = 42
tf.random.set_seed(_seed)

def _sub_to_batch(sub):
    return sub.batch(window_size, drop_remainder=True)

def _return_input_output(tensor):
    _input  = tensor[:, :-forecast_period, :]
    _output = tensor[:, forecast_period:, :]
    return _input, _output

def _reshape_tensor(tensor):
    tensor = tf.expand_dims(tensor, axis=-1)
    tensor = tf.transpose(tensor, [1, 0, 2])
    return tensor


# total elements after unbatch(): 3813
train_ts_dataset = tf.data.Dataset.from_tensor_slices(train_ts)\
                            .window(window_size, shift=1)\
                            .flat_map(_sub_to_batch)\
                            .map(_reshape_tensor)\
                            .map(_return_input_output)
#                             .unbatch().shuffle(buffer_size=500, seed=_seed).batch(batch_size)\
#                             .map(_return_input_output)

valid_ts_dataset = tf.data.Dataset.from_tensor_slices(valid_ts)\
                            .window(window_size, shift=1)\
                            .flat_map(_sub_to_batch)\
                            .map(_reshape_tensor)\
                            .unbatch().shuffle(buffer_size=500, seed=_seed).batch(batch_size)\
                            .map(_return_input_output)

def _forecast_mae(y_pred, y_true):
    _y_pred = y_pred[:, -forecast_period:, :]
    _y_true = y_true[:, -forecast_period:, :]
    mae = tf.losses.MAE(_y_true, _y_pred)
    return mae

def _accuracy(y_pred, y_true):
    # print(y_true) => Tensor("sequential/time_distributed/Reshape_1:0", shape=(None, 34, 1), dtype=float32)
    # y_true[-forecast_period:, :]  =>   Tensor("strided_slice_4:0", shape=(None, 34, 1), dtype=float32)
    # y_true[:, -forecast_period:, :] => Tensor("strided_slice_4:0", shape=(None, 6, 1), dtype=float32)

    _y_pred = y_pred[:, -forecast_period:, :]
    _y_pred = tf.reshape(_y_pred, shape=[-1, forecast_period])
    _y_true = y_true[:, -forecast_period:, :]
    _y_true = tf.reshape(_y_true, shape=[-1, forecast_period])

    # MAPE: Tensor("Mean_1:0", shape=(None, 1), dtype=float32)
    MAPE = tf.math.reduce_mean(tf.math.abs((_y_pred - _y_true) / _y_true), axis=1, keepdims=True)

    accuracy = 1 - MAPE
    accuracy = tf.where(accuracy < 0, tf.zeros_like(accuracy), accuracy)
    accuracy = tf.reduce_mean(accuracy)
    return accuracy

model = k.models.Sequential([
    k.layers.Bidirectional(k.layers.LSTM(units=100, return_sequences=True), input_shape=(None, 1)),
    k.layers.Bidirectional(k.layers.LSTM(units=100, return_sequences=True)),
    k.layers.TimeDistributed(k.layers.Dense(1))
])

model_name = []
model_name_symbols = {"bidirectional": "BILSTM_1", "bidirectional_1": "BILSTM_2", "time_distributed": "td"}
for l in model.layers:
    model_name.append(model_name_symbols.get(l.name, l.name))

model_name = "_".join(model_name)
print(model_name)

for i, (x, y) in enumerate(train_ts_dataset):
    print(i, x.numpy().shape, y.numpy().shape)

数据集形状的输出如下:

BILSTM_1_BILSTM_2_td
0 (123, 34, 1) (123, 34, 1)
1 (123, 34, 1) (123, 34, 1)
2 (123, 34, 1) (123, 34, 1)
3 (123, 34, 1) (123, 34, 1)
4 (123, 34, 1) (123, 34, 1)
5 (123, 34, 1) (123, 34, 1)
6 (123, 34, 1) (123, 34, 1)
7 (123, 34, 1) (123, 34, 1)
8 (123, 34, 1) (123, 34, 1)

然后:

_datetime = datetime.datetime.now().strftime("%Y%m%d-%H-%M-%S")
_log_dir = os.path.join(".", "logs", "fit7", model_name, _datetime)

tensorboard_cb = k.callbacks.TensorBoard(log_dir=_log_dir)

model.compile(loss="mae", optimizer=tf.optimizers.Adam(learning_rate=0.001), metrics=[_forecast_mae, _accuracy])

history = model.fit(train_ts_dataset, epochs=100, validation_data=train_ts_dataset, callbacks=[tensorboard_cb])

我一直在研究训练和验证数据集的损失函数,我一直看到验证损失小于训练损失。我可能不合适。但是,我将验证集替换为训练集作为一个简单的测试,以在训练和测试时监控损失和准确性。但我仍然得到验证准确度高于训练准确度。以下是训练和验证数据集的准确性:

在此处输入图像描述

对我来说这很奇怪,尽管我使用相同的数据集进行训练和测试,但我得到的验证准确度高于训练准确度。并且没有辍学,没有batchNormalization层等......

关于这种行为可能是什么原因的任何提示?那将不胜感激!

==================================================== ==================

这里对代码进行了一些修改,以检查批量大小是否有任何影响。此外,为了消除 中的任何疑虑tf.data.Dataset,我使用 numpy 数组作为输入。因此,新代码如下:

custom_train_ts   = train_ts.transpose(1, 0)[..., np.newaxis]
custom_train_ts_x = custom_train_ts[:, :window_size, :] # size: 123, window_size, 1
custom_train_ts_y = custom_train_ts[:, -window_size:, :] # size: 123, window_size, 1

custom_valid_ts   = valid_ts.transpose(1, 0)[..., np.newaxis]
custom_valid_ts_x = custom_valid_ts[:, :window_size, :]
custom_valid_ts_y = custom_valid_ts[:, -window_size:, :]
custom_valid_ts   = (custom_valid_ts_x, custom_valid_ts_y)

其次,为了确保在整个数据集上计算准确性,而不依赖于批量大小,我按原样输入数据集,而不进行批处理。此外,我实现了一个自定义指标,如下所示:

def _accuracy(y_true, y_pred):
    # print(y_true) => Tensor("sequential/time_distributed/Reshape_1:0", shape=(None, 34, 1), dtype=float32)
    # y_true[-forecast_period:, :]  =>   Tensor("strided_slice_4:0", shape=(None, 34, 1), dtype=float32)
    # y_true[:, -forecast_period:, :] => Tensor("strided_slice_4:0", shape=(None, 6, 1), dtype=float32)

    _y_pred = y_pred[:, -forecast_period:, :]
    _y_pred = tf.reshape(_y_pred, shape=[-1, forecast_period])
    _y_true = y_true[:, -forecast_period:, :]
    _y_true = tf.reshape(_y_true, shape=[-1, forecast_period])

    # MAPE: Tensor("Mean_1:0", shape=(None, 1), dtype=float32)
    MAPE = tf.math.reduce_mean(tf.math.abs((_y_pred - _y_true) / _y_true), axis=1, keepdims=True)

    accuracy = 1 - MAPE
    accuracy = tf.where(accuracy < 0, tf.zeros_like(accuracy), accuracy)        
    accuracy = tf.reduce_mean(accuracy)
    return accuracy


class MyAccuracy(tf.keras.metrics.Metric):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.accuracy_function = _accuracy
        self.y_true_lst = []
        self.y_pred_lst = []

    def update_state(self, y_true, y_pred, sample_weight=None):
        self.y_true_lst.append(y_true)
        self.y_pred_lst.append(y_pred)

    def result(self):
        y_true_concat = tf.concat(self.y_true_lst, axis=0)
        y_pred_concat = tf.concat(self.y_pred_lst, axis=0)
        accuracy = self.accuracy_function(y_true_concat, y_pred_concat)
        self.y_true_lst = []
        self.y_pred_lst = []
        return accuracy
    def get_config(self):
        base_config = super().get_config()
        return {**base_config}

最后,模型编译并拟合为:

model.compile(loss="mae", optimizer=tf.optimizers.Adam(hparams["learning_rate"]), 
              metrics=[tf.metrics.MAE, MyAccuracy()])

history = model.fit(custom_train_ts_x, custom_train_ts_y, epochs=120, batch_size=123, validation_data=custom_valid_ts, 
                    callbacks=[tensorboard_cb])

当我查看 tensorboard 中的训练和验证准确性时,我得到以下信息:

在此处输入图像描述

因此,显然,这没有任何意义。此外,在这种情况下,我确保在调用result(). 但是,在查看验证损失时,我发现训练损失低于验证损失:

在此处输入图像描述

标签: pythontensorflow

解决方案


它们是不同的,因为优化器会在每批结束时更新参数,并且val_loss将在最后计算 ,但train_loss将在过程中计算。

即使您在一个批次中只有一个样本,并且每个时期只有一个批次,它们也会彼此不同,因为网络将为您的样本进行前向传递并计算将被调用的损失,train_loss并且在更新参数后它将再次计算损失,这一次它将被调用val_loss(在这种情况下,您的下一个时期train_loss将等于您的当前val_loss)。

所以如果你想检查我刚才说的是不是真的,你可以简单地将learning_rate你的优化器设置为0,那么你会得到同样的损失。

这是我在 MNIST 上针对相同问题测试过的代码(暂时您可以从这里查看我的 colab 上的代码和结果):

# ---------------------------------
# Importing stuff
import numpy as np
import tensorflow as tf
from tensorflow import keras

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *

from keras.utils import to_categorical

# ---------------------------------
(trainX, trainy), (testX, testy) = keras.datasets.mnist.load_data()

# one-hot
trainy = to_categorical(trainy, 10)
testy = to_categorical(testy, 10)

# image should be in shape of (28, 28, 1) not (28, 28)
trainX = np.expand_dims(trainX, -1)
testX = np.expand_dims(testX, -1)

# normalize
trainX = trainX/255.
testX = testX/255.

# ---------------------------------
# Build the model
model = Sequential()
model.add(Input(trainX.shape[1:]))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

model.summary()

编译并适应多种场景:

# training on 1 sample, but with learning_rate != 0
opt = keras.optimizers.Adam(learning_rate = 0.001)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['categorical_accuracy'])

batchX = trainX[0].reshape(1, 28, 28, 1)
batchy = trainy[0].reshape(1, 10)

model.fit(batchX, batchy, validation_data = (batchX, batchy), batch_size = 1, 
          shuffle = False, validation_batch_size = 1, epochs = 5)

# You will see that the loss and val_loss are different and the
# next steps loss is equal to the current steps val_loss
# training on 1 sample, with learning_rate == 0
opt = keras.optimizers.Adam(learning_rate = 0)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['categorical_accuracy'])

batchX = trainX[0].reshape(1, 28, 28, 1)
batchy = trainy[0].reshape(1, 10)

model.fit(batchX, batchy, validation_data = (batchX, batchy), batch_size = 1, 
          shuffle = False, validation_batch_size = 1, epochs = 5)

# You will see that the loss and val_loss are equal because 
# the parameters will not change
# training on the complete dataset but with learning_rate != 0
opt = keras.optimizers.Adam(learning_rate = 0.001)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['categorical_accuracy'])

model.fit(trainX, trainy, validation_data = (trainX, trainy), batch_size = 32, 
          shuffle = False, validation_batch_size = 32, epochs = 5)

# this is similar to the case you asked
# training on the complete dataset and learning_rate == 0
opt = keras.optimizers.Adam(learning_rate = 0)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['categorical_accuracy'])

model.fit(trainX, trainy, validation_data = (trainX, trainy), batch_size = 32, 
          shuffle = False, validation_batch_size = 32, epochs = 5)

# set the learning_rate to zero and again you'll get loss == val_loss

推荐阅读