首页 > 解决方案 > PyTorch 循环遍历 epoch,然后输出所有 epoch 的最终值

问题描述

下面的代码计算 MSE 和 MAE 值,但我有一个问题,即 MAE 和 MSE 的值在每个时期结束后没有得到 store_MAE 并存储 MSE。它似乎只使用最后一个时期的值。知道我需要在代码中做什么来保存每个时期的值,我希望这是有道理的。谢谢你的帮助

    global_step = 0
best_test_error = 10000
MAE_for_all_epochs = []
MSE_for_all_epochs = []
for epoch in range(4):
    print("Epoch %d" % epoch)
    model.train()
    for images, paths in tqdm(loader_train):
        images = images.to(device)
        targets = torch.tensor([metadata['count'][os.path.split(path)[-1]] for path in paths]) # B
        targets = targets.float().to(device)

        # forward pass:
        output = model(images) # B x 1 x 9 x 9 (analogous to a heatmap)
        preds = output.sum(dim=[1,2,3]) # predicted cell counts (vector of length B)
        
        # backward pass:
        loss = torch.mean((preds - targets)**2)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # logging:
        count_error = torch.abs(preds - targets).mean()
        writer.add_scalar('train_loss', loss.item(), global_step=global_step)
        writer.add_scalar('train_count_error', count_error.item(), global_step=global_step)

        print("Step %d, loss=%f, count error=%f" % (global_step,loss.item(),count_error.item()))

        global_step += 1
    
    mean_test_error = 0
    model.eval()
    for images, paths in tqdm(loader_test):
        images = images.to(device)
        targets = torch.tensor([metadata['count'][os.path.split(path)[-1]] for path in paths]) # B
        targets = targets.float().to(device)

        # forward pass:
        output = model(images) # B x 1 x 9 x 9 (analogous to a heatmap)
        preds = output.sum(dim=[1,2,3]) # predicted cell counts (vector of length B)

        # logging:
        #error = torch.abs(preds - targets).sum().data
        #squared_error = ((preds - targets)*(preds - targets)).sum().data

        #runnning_mae += error
        #runnning_mse += squared_error

        loss = torch.mean((preds - targets)**2)
        count_error = torch.abs(preds - targets).mean()
        mean_test_error += count_error
        writer.add_scalar('test_loss', loss.item(), global_step=global_step)
        writer.add_scalar('test_count_error', count_error.item(), global_step=global_step)
        
        global_step += 1
        #store_MAE = 0
        #store_MSE = 0

        mean_test_error = mean_test_error / len(loader_test)
        #store_MAE += mean_test_error
        MAE_for_all_epochs = np.append(MAE_for_all_epochs, mean_test_error)

        mse = math.sqrt(loss / len(loader_test))
        #store_MSE +=mse
        MSE_for_all_epochs = np.append(MSE_for_all_epochs, mse)

        print("Test count error: %f" % mean_test_error)
        print("MSE: %f" % mse)

    if mean_test_error < best_test_error:
        best_test_error = mean_test_error
        torch.save({'state_dict':model.state_dict(),
                    'optimizer_state_dict':optimizer.state_dict(),
                    'globalStep':global_step,
                    'train_paths':dataset_train.files,
                    'test_paths':dataset_test.files},checkpoint_path)

print("MAE Total: %f" % store_MAE)
print("MSE Total: %f" % store_MSE)
model_mae= MAE_for_all_epochs / epoch
model_mse= MSE_for_all_epochs / epoch
print("Model MAE: %f" % model_mae)
print("Model MSE: %f" % model_mse)

标签: pythonpython-3.xpytorch

解决方案


np.append()将像这样适用于您的情况,

#outside epochs loop
MAE_for_all_epochs = [] 

#inside loop
#replace this store_MAE with relevant variable

MAE_for_all_epochs = np.append(MAE_for_all_epochs, store_MAE) 

编辑:玩具示例:根据使用情况

import numpy as np
all_var = []

for e in range(1, 10):
    var1 = np.random.random(1)
    all_var = np.append(all_var, var1)
    
print(all_var)
# output : [0.07660848 0.46824825 0.09432051 0.79462902 0.97798061 0.67299183 0.50996432 0.13084029 0.95100381]

推荐阅读