首页 > 技术文章 > TensorFlow——TensorBoard可视化

digger72 2020-10-30 20:56 原文

TensorBoard

是一款为了更方便TensorFlow程序的理解、调试与优化发布的可视化工具,可以用TensorBoard来展现你的TensorFlow图像,绘制图像生成的定量指标图以及附加数据。

  • 通过读取TensorFlow的事件文件来运行
  • TensorFlow事件文件包括了你在运行中涉及到的主要数据

主要内容

  1. 通过tf.keras回调函数使用tensorboard
  2. 认识tensorboard界面
  3. 在tf.keras回调函数中记录自定义变量
  4. 在自定义循环中使用tensorboard

显示方式

  1. notebook
# 在notebook中显示
%load_ext tensorboard
%matplotlib inline

# 启动tensorboard:参数为目录地址,此处目录为logs
%tensorboard --logdir logs
  1. 浏览器
    (1)打开cmd命令行,进入代码文件所在路径
    (2)运行tensorboard --logdir logs命令
    (3)将显示的网址复制到浏览器打开

记录自定义标量值

重新调整回归模型并记录自定义学习率。

  1. 使用创建文件编写器:tf.summary.create_file_writer()
  2. 定义自定义学习率功能,传递给Keras LeariningRateScheduler回调
  3. 在学习率功能内,用于tf.summary.scalar()记录自定义学习率
  4. 将LeariningRateScheduler回调传递给Model.fit()

实例:

import tensorflow as tf
import datetime
(train_images,train_labels),(test_images,test_labels) = tf.keras.datasets.mnist.load_data()
train_images = tf.expand_dims(train_images,-1)
test_images = tf.expand_dims(test_images,-1)
train_images = tf.cast(train_images/255,tf.float32)
test_images = tf.cast(test_images/255,tf.float32)
train_labels = tf.cast(train_labels,tf.int64)
test_labels = tf.cast(test_labels,tf.int64)
dataset = tf.data.Dataset.from_tensor_slices((train_images,train_labels))
test_dataset = tf.data.Dataset.from_tensor_slices((test_images,test_labels))

# 在使用fit时repeat()需要带参数,自定义循环时repeat()不需要带参数
dataset = dataset.repeat().shuffle(60000).batch(128)
test_dataset = test_dataset.repeat().batch(128)

model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(16,[3,3],activation='relu',input_shape=(None,None,1)),
    tf.keras.layers.Conv2D(32,[3,3],activation='relu'),
    tf.keras.layers.GlobalMaxPooling2D(),
    tf.keras.layers.Dense(10,activation='softmax')
])

model.compile(optimizer='adam',
             loss='sparse_categorical_crossentropy',
             metrics=['accuracy'])

# 为目录名添加时间
import os
log_dir = os.path.join('logs',datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

# 第一个参数:事件文件存放地址  
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir,histogram_freq=1)

# 创建一个文件编写器
file_writer = tf.summary.create_file_writer(log_dir + '/lr')

# 设置为默认的文件编写器
file_writer.set_as_default()

# 定义方法改变学习速率
def lr_sche(epoch):
    learning_rate = 0.2
    if epoch > 5:
        learning_rate = 0.02
    if epoch > 10:
        learning_rate = 0.01
    if epoch > 20:
        learning_rate = 0.005
    #将变化写入磁盘
    tf.summary.scalar('learning_rate',data = learning_rate,step = epoch)
    return learning_rate

# 改变学习速率的回调函数
lr_callback = tf.keras.callbacks.LearningRateScheduler(lr_sche)

model.fit(dataset,epochs=25,
          steps_per_epoch=60000//128,
          validation_data=test_dataset,
          validation_steps=10000//128,
          callbacks=[tensorboard_callback,lr_callback]
         )

在自定义训练中使用TensorBoard

optimizer = tf.keras.optimizersmizerstimizers.Adam()
loss_func = tf.keras.losses.SparseCategoricalCrossentropy()
def loss(model,x,y):
    y_ = model(x)
    return loss_func(y,y_)
    
# 初始化计算对象
train_loss = tf.keras.metrics.Mean('train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy') 
test_loss = tf.keras.metrics.Mean('test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy') 

def train_step(model,images,labels):
    with tf.GradientTape() as t:
        pred = model(images)
        loss_step = loss_func(labels,pred)
        #loss_step = loss(model,images,labels) # 每一步的损失值
    grads = t.gradient(loss_step,model.trainable_variables) # 损失函数与可训练参数之间的梯度
    optimizer.apply_gradients(zip(grads,model.trainable_variables)) # 优化函数应用梯度进行优化
    # 汇总计算平均loss
    train_loss(loss_step)
    # 汇总计算平均acc
    train_accuracy(labels,pred)

def test_step(model,images,labels):
        pred = model(images)
        loss_step = loss_func(labels,pred)
        # 汇总计算平均loss
        test_loss(loss_step)
        # 汇总计算平均acc
        test_accuracy(labels,pred)



current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape' + current_time + '/train'
test_log_dir = 'logs/gradient_tape' + current_time + '/test'

train_writer = tf.summary.create_file_writer(train_log_dir)
test_writer = tf.summary.create_file_writer(test_log_dir)

def train():
    for epoch in range(10):
        for (batch,(images,labels)) in enumerate(dataset):
            train_step(model,images,labels)
        with train_writer.as_default():
            tf.summary.scalar('loss',train_loss.result(),step = epoch) # 收集标量值,记录
            tf.summary.scalar('acc',train_accuracy.result(),step = epoch)
        
        print('Epoch{}  loss is {}, accuracy is {}'.format(epoch,train_loss.result(),train_accuracy.result()))
        for (batch,(images,labels)) in enumerate(test_dataset):
            test_step(model,images,labels)
            print('*',end='')
            
        with test_writer.as_default():
            tf.summary.scalar('loss',test_loss.result(),step = epoch) # 收集标量值,记录
            tf.summary.scalar('acc',test_accuracy.result(),step = epoch)
            
        print('Epoch{}  test_loss is {}, test_accuracy is {}'.format(epoch,test_loss.result(),test_accuracy.result()))
        # 重置
        train_loss.reset_states()
        train_accuracy.reset_states()
        test_loss.reset_states()
        test_accuracy.reset_states()


推荐阅读