首页 > 解决方案 > Tensorflow,如何存储变量?

问题描述

所以我刚刚开始对 tensorflow 进行一些试验,但我觉得我很难掌握这个概念,我目前专注于 MNIST 数据集,但其中只有 8000 个用于训练,2000 个用于测试。我目前拥有的小代码片段是:

from keras.layers import Input, Dense, initializers
from keras.models import Model
from Dataset import Dataset
import matplotlib.pyplot as plt
from keras import optimizers, losses
import tensorflow as tf
import keras.backend as K

#global variables
d = Dataset()
num_features = d.X_train.shape[1]
low_dim = 32

def autoencoder():
    w = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
    input = Input(shape=(num_features,))

    encoded = Dense(low_dim, activation='relu', kernel_initializer = w)(input)

    decoded = Dense(num_features, activation='sigmoid', kernel_initializer = w)(encoded)

    autoencoder = Model(input, decoded)
    adam = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
    autoencoder.compile(optimizer=adam, loss='binary_crossentropy')
    autoencoder.fit(d.X_train, d.X_train,
                    epochs=50,
                    batch_size=64,
                    shuffle=True,
                    )

    encoded_imgs = autoencoder.predict(d.X_test)
    decoded_imgs = autoencoder.predict(encoded_imgs)
    #sess = tf.InteractiveSession()
    #error = losses.mean_absolute_error(decoded_imgs[0], d.X_train[0])
    #print(error.eval())
    #print(decoded_imgs.shape)
    #sess.close()
    n = 20  # how many digits we will display
    plt.figure(figsize=(20, 4))
    for i in range(n):
        # display original
        #sess = tf.InteractiveSession()
        error = losses.mean_absolute_error(decoded_imgs[n], d.X_test[n])
        #print(error.eval())
        #print(decoded_imgs.shape)
        #sess.close()
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(d.X_test[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # display reconstruction
        ax = plt.subplot(2, n, i + 1 + n)
        plt.imshow(decoded_imgs[i].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
    #print(error)
    plt.show()
    return error

我想要做的是将错误存储为一个列表,稍后我可以打印或绘制在图表中,但是如何使用 tensorflow/keras 有效地做到这一点?提前致谢

标签: pythontensorflow

解决方案


您可以使用回调 CSVLogger 将错误存储在 csv 文件中。这是此任务的代码片段。

from keras.callbacks import CSVLogger

# define callbacks
callbacks = [CSVLogger(path_csv_logger, separator=';', append=True)]

# pass callback to model.fit() oder model.fit_generator()
model.fit_generator(
    train_batch, train_steps, epochs=10, callbacks=callbacks,
    validation_data=validation_batch, validation_steps=val_steps)

编辑:为了将错误存储在列表中,您可以使用类似这样的东西

# source https://keras.io/callbacks/
class LossHistory(keras.callbacks.Callback):
    def on_train_begin(self, logs={}):
        self.losses = []

    def on_batch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))

推荐阅读