首页 > 解决方案 > 验证性能可以从 MNIST 训练数据和测试数据输出吗?

问题描述

import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torch.nn.init
import torch.nn.functional as F
# from sklearn.linear_model import SGDClassifier # Test i did
# from sklearn.model_selection import cross_val_score

device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)

learning_rate = 0.001
training_epochs = 15
batch_size = 100

mnist_train = dsets.MNIST(root='MNIST_data/', # Specify download path
                          train=True, # Specify True to download as training data
                          transform=transforms.ToTensor(), # Convert to tensor
                          download=True)

mnist_test = dsets.MNIST(root='MNIST_data/', # Specify download path
                         train=False, # If false is specified, download as test data
                         transform=transforms.ToTensor(), # Convert to tensor
                         download=True)

data_loader = torch.utils.data.DataLoader(dataset=mnist_train,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          drop_last=True)

class CNN(torch.nn.Module):

    def __init__(self):
        super(CNN, self).__init__()
        # l layer
        # ImgIn shape=(?, 28, 28, 1)
    
        self.layer1 = torch.nn.Sequential(
            torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2))

        # 2 layer
 
        self.layer2 = torch.nn.Sequential(
            torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2))

        # 7x7x64 inputs -> 10 outputs
        self.fc1 = torch.nn.Linear(7 * 7 * 64, 100, bias=True)
        self.fc2 = torch.nn.Linear(100, 10, bias=True)


    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.view(out.size(0), -1)   # Flatten for total bonding layer
        out = F.relu(self.fc1(out))
        out = self.fc2(out)
        return out

# CNN model definition
model = CNN().to(device)

criterion = torch.nn.CrossEntropyLoss().to(device)    # Softmax function included in cost function.
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)


total_batch = len(data_loader)
print('Number of batches : {}'.format(total_batch))

def epoch_acc():
    X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device)
    Y_test = mnist_test.test_labels.to(device)

    prediction = model(X_test)
    correct_prediction = torch.argmax(prediction, 1) == Y_test
    accuracy = correct_prediction.float().mean()
    acc = accuracy.item()

    return acc

for epoch in range(training_epochs):
    avg_cost = 0

    for X, Y in data_loader: # I take it out in a mini-batch unit
        # image is already size of (28x28), no reshape
        # label is not one-hot encoded
        X = X.to(device)
        Y = Y.to(device)

        optimizer.zero_grad()
        hypothesis = model(X)
        cost = criterion(hypothesis, Y)
        cost.backward()
        optimizer.step()

        avg_cost += cost / total_batch

    print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost) + " " + "ACC= ", epoch_acc())

# I will not proceed with learning, so torch.no_grad()
with torch.no_grad():
    X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device)
    Y_test = mnist_test.test_labels.to(device)

    prediction = model(X_test)
    correct_prediction = torch.argmax(prediction, 1) == Y_test
    accuracy = correct_prediction.float().mean()
    print('Accuracy:', accuracy.item())

这段代码是MNIST使用CNN的数据分离。

如果您在 15 个 epoch 期间运行此命令,则会显示成本和学习性能 (acc)。

但是,我想在这里输出如图所示的验证性能

在此处输入图像描述

图片也是使用此代码的代码。

目前,在这段代码中,训练数据设置为 60000,测试数据设置为 10000。这里如何输出验证性能?

我可以在代码本身中找到验证性能(VallACC)吗?

或者使用 sklearn 我应该创建一个新的验证集并使用与交叉验证相同的方法吗?


2021-04-26 此代码已通过添加您上传的代码进行了修改。

import torchvision.transforms as transforms
import torch.nn.init
import torch.nn.functional as F
import numpy as np
from torch.utils.data import (
    DataLoader,
    random_split,
    SubsetRandomSampler,
    WeightedRandomSampler,
)

device = "cuda" if torch.cuda.is_available() else "cpu"
print(device)

learning_rate = 0.001
training_epochs = 15
batch_size = 100

mnist_train = dsets.MNIST(
    root="MNIST_data/",  # Specify download path
    train=True,  # Specify True to download as training data
    transform=transforms.ToTensor(),  # Convert to tensor
    download=True,
)

mnist_test = dsets.MNIST(
    root="MNIST_data/",  # Specify download path
    train=False,  # If false is specified, download as test data
    transform=transforms.ToTensor(),  # Convert to tensor
    download=True,
)

data_loader = torch.utils.data.DataLoader(
    dataset=mnist_train, batch_size=batch_size, shuffle=True, drop_last=True
)


class CNN(torch.nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        # l layer
        # ImgIn shape=(?, 28, 28, 1)

        self.layer1 = torch.nn.Sequential(
            torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2),
        )

        # 2 layer

        self.layer2 = torch.nn.Sequential(
            torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2),
        )

        # 7x7x64 inputs -> 10 outputs
        self.fc1 = torch.nn.Linear(7 * 7 * 64, 100, bias=True)
        self.fc2 = torch.nn.Linear(100, 10, bias=True)

    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.view(out.size(0), -1)  # Flatten for total bonding layer
        out = F.relu(self.fc1(out))
        out = self.fc2(out)
        return out


# CNN model definition
model = CNN().to(device)

criterion = torch.nn.CrossEntropyLoss().to(device)  # Softmax function included in cost function.
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)


total_batch = len(data_loader)
print("Number of batches : {}".format(total_batch))

valid_size = 0.2
num_train = len(mnist_train)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]

train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)

train_loader = torch.utils.data.DataLoader(
    mnist_train, batch_size=64, sampler=train_sampler, num_workers=2
)
valid_loader = torch.utils.data.DataLoader(
    mnist_train, batch_size=30, sampler=valid_sampler, num_workers=2
)

for i, (data, target) in enumerate(train_loader):
    # move tensor to gpu if cuda is available

    data, target = data.to(device), target.to(device)
    # clear the gradiant of all optimizer variable
    optimizer.zero_grad()
    # forward pass: compute pradictions by passing inputs
    output = model(data)
    # calculate batch loss
    loss = criterion(output, target)
    # backward pass: compute gradiant of the loss with respect to the parameters
    loss.backward()
    # update parameters by optimizing single step
    optimizer.step()
    # update training loss
    train_loss += loss.item() * data.size(0)


# validate the model

model.eval()
for batch_idx, (data, target) in enumerate(valid_loader):
    # move tensor to gpu

    data, target = data.to(device), target.to(device)
    # forward pass: compute the validation predictions
    output = model(data)
    # calculate the loss
    loss = criterion(output, target)
    # update the validation loss
    valid_loss += loss.item() * data.size(0)

# calculate average loss
train_losses = train_loss / len(train_loader.sampler)
valid_losses = valid_loss / len(valid_loader.sampler)
scheduler.step()
# Print the train and validation loss statistic
print(
    "Epoch: {} \t Training Loss: {:.3f} \t Validation Loss: {:.3f}".format(
        epoch, train_losses, valid_losses
    )
)
# save model if validation loss decrease
if valid_losses <= valid_loss_min:
    print(
        "Validation loss decreased {:.4f}--->{:.4f}  Saving model...".format(
            valid_loss_min, valid_losses
        )
    )
    # save current model
    torch.save(model.state_dict(), "model_cifer.pt")
    valid_loss_min = valid_losses
print("Learning rate: {:.5f}".format(optimizer.state_dict()["param_groups"][0]["lr"]))

标签: pythonmachine-learningscikit-learnpytorchmnist

解决方案


  1. 将训练数据分成两部分进行验证和训练。

获得将用于验证的训练指标

valid_size = 0.2
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]

定义采样器以获取训练和验证批次

train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)

结合数据集和采样器来准备数据加载器。

train_loader = torch.utils.data.DataLoader(train_data, batch_size=64,
sampler=train_sampler, num_workers=2)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=30, 
sampler=valid_sampler, num_workers=2)

2.使用验证数据对模型进行验证。每个epoch训练损失和验证损失都会打印出来,只有验证损失最小的参数才会被保存。

    for i,(data, target) in enumerate(train_loader):       
        # move tensor to gpu if cuda is available
        
        data, target = data.to(device), target.to(device)
        # clear the gradiant of all optimizer variable
        optimizer.zero_grad()
        # forward pass: compute pradictions by passing inputs
        output = model(data)
        # calculate batch loss
        loss = criterion(output, target)
        # backward pass: compute gradiant of the loss with respect to the parameters
        loss.backward()
        # update parameters by optimizing single step
        optimizer.step()
        # update training loss
        train_loss += loss.item()*data.size(0)

    
    # validate the model

    model.eval()
    for batch_idx, (data, target) in enumerate(valid_loader):
        # move tensor to gpu
        
        data, target = data.to(device), target.to(device)
        # forward pass: compute the validation predictions
        output = model(data)
        # calculate the loss
        loss = criterion(output, target)
        # update the validation loss 
        valid_loss += loss.item()*data.size(0)

    # calculate average loss
    train_losses = train_loss/len(train_loader.sampler)
    valid_losses = valid_loss/len(valid_loader.sampler)
    scheduler.step()
    # Print the train and validation loss statistic
    print('Epoch: {} \t Training Loss: {:.3f} \t Validation Loss: {:.3f}'.format(epoch, train_losses, valid_losses))
    # save model if validation loss decrease
    if valid_losses <= valid_loss_min:
        print("Validation loss decreased {:.4f}--->{:.4f}  Saving model...".format(valid_loss_min, valid_losses))
        # save current model
        torch.save(model.state_dict(), 'model_cifer.pt')
        valid_loss_min = valid_losses
    print('Learning rate: {:.5f}'.format(optimizer.state_dict()['param_groups'][0]['lr']))

推荐阅读