首页 > 解决方案 > Pytorch 预测变得相同

问题描述

我开发了一个分类深度学习模型,我在训练时得到了这个结果:

tensor([[ 0.0141, -0.0279,  0.0623],
        [ 0.0141, -0.0279,  0.0623],
        [ 0.0141, -0.0279,  0.0623],
        [ 0.0141, -0.0279,  0.0623]], grad_fn=<AddmmBackward>) tensor([1., 0., 1., 2.])
epoch = 1/10, step = 100/595, loss = 1.1103
tensor([[ 0.0294, -0.0161,  0.0353],
        [ 0.0294, -0.0161,  0.0353],
        [ 0.0294, -0.0161,  0.0353],
        [ 0.0294, -0.0161,  0.0353]], grad_fn=<AddmmBackward>) tensor([0., 0., 0., 1.])
epoch = 1/10, step = 200/595, loss = 1.0971
tensor([[ 0.0315, -0.0199,  0.0369],
        [ 0.0315, -0.0199,  0.0369],
        [ 0.0315, -0.0199,  0.0369],
        [ 0.0315, -0.0199,  0.0369]], grad_fn=<AddmmBackward>) tensor([0., 0., 2., 1.])
epoch = 1/10, step = 300/595, loss = 1.0951
tensor([[ 0.0388, -0.0036,  0.0134],
        [ 0.0388, -0.0036,  0.0134],
        [ 0.0388, -0.0036,  0.0134],
        [ 0.0388, -0.0036,  0.0134]], grad_fn=<AddmmBackward>) tensor([1., 1., 1., 1.])
epoch = 1/10, step = 400/595, loss = 1.1186
tensor([[ 0.0476, -0.0057,  0.0066],
        [ 0.0476, -0.0057,  0.0066],
        [ 0.0476, -0.0057,  0.0066],
        [ 0.0476, -0.0057,  0.0066]], grad_fn=<AddmmBackward>) tensor([1., 1., 2., 0.])
epoch = 1/10, step = 500/595, loss = 1.1043
tensor([[-0.0150,  0.0246,  0.0390],
        [-0.0150,  0.0246,  0.0390],
        [-0.0150,  0.0246,  0.0390],
        [-0.0150,  0.0246,  0.0390]], grad_fn=<AddmmBackward>) tensor([2., 1., 2., 0.])
epoch = 2/10, step = 100/595, loss = 1.0932
tensor([[0.0168, 0.0212, 0.0106],
        [0.0168, 0.0212, 0.0106],
        [0.0168, 0.0212, 0.0106],
        [0.0168, 0.0212, 0.0106]], grad_fn=<AddmmBackward>) tensor([0., 1., 0., 0.])
epoch = 2/10, step = 200/595, loss = 1.0969
tensor([[ 0.0448, -0.0110,  0.0148],
        [ 0.0448, -0.0110,  0.0148],
        [ 0.0448, -0.0110,  0.0148],
        [ 0.0448, -0.0110,  0.0148]], grad_fn=<AddmmBackward>) tensor([0., 2., 2., 1.])
epoch = 2/10, step = 300/595, loss = 1.0992
tensor([[0.0310, 0.0007, 0.0168],
        [0.0310, 0.0007, 0.0168],
        [0.0310, 0.0007, 0.0168],
        [0.0310, 0.0007, 0.0168]], grad_fn=<AddmmBackward>) tensor([1., 1., 0., 0.])
epoch = 2/10, step = 400/595, loss = 1.0990
tensor([[ 0.0336,  0.0241, -0.0092],
        [ 0.0336,  0.0241, -0.0092],
        [ 0.0336,  0.0241, -0.0092],
        [ 0.0336,  0.0241, -0.0092]], grad_fn=<AddmmBackward>) tensor([0., 1., 1., 1.])
epoch = 2/10, step = 500/595, loss = 1.0885
tensor([[0.0288, 0.0057, 0.0140],
        [0.0288, 0.0057, 0.0140],
        [0.0288, 0.0057, 0.0140],
        [0.0288, 0.0057, 0.0140]], grad_fn=<AddmmBackward>) tensor([1., 0., 1., 1.])
epoch = 3/10, step = 100/595, loss = 1.1033
tensor([[0.0243, 0.0002, 0.0240],
        [0.0243, 0.0002, 0.0240],
        [0.0243, 0.0002, 0.0240],
        [0.0243, 0.0002, 0.0240]], grad_fn=<AddmmBackward>) tensor([2., 2., 1., 1.])
epoch = 3/10, step = 200/595, loss = 1.1027
tensor([[ 0.0512,  0.0081, -0.0107],
        [ 0.0512,  0.0081, -0.0107],
        [ 0.0512,  0.0081, -0.0107],
        [ 0.0512,  0.0081, -0.0107]], grad_fn=<AddmmBackward>) tensor([2., 0., 0., 1.])
epoch = 3/10, step = 300/595, loss = 1.0902
tensor([[ 0.0374, -0.0038,  0.0150],
        [ 0.0374, -0.0038,  0.0150],
        [ 0.0374, -0.0038,  0.0150],
        [ 0.0374, -0.0038,  0.0150]], grad_fn=<AddmmBackward>) tensor([0., 2., 0., 0.])
epoch = 3/10, step = 400/595, loss = 1.0831
tensor([[ 0.0325,  0.0225, -0.0065],
        [ 0.0325,  0.0225, -0.0065],
        [ 0.0325,  0.0225, -0.0065],
        [ 0.0325,  0.0225, -0.0065]], grad_fn=<AddmmBackward>) tensor([1., 2., 2., 1.])
epoch = 3/10, step = 500/595, loss = 1.1069
tensor([[0.0210, 0.0100, 0.0175],
        [0.0210, 0.0100, 0.0175],
        [0.0210, 0.0100, 0.0175],
        [0.0210, 0.0100, 0.0175]], grad_fn=<AddmmBackward>) tensor([0., 1., 1., 2.])
epoch = 4/10, step = 100/595, loss = 1.1002
tensor([[0.0189, 0.0051, 0.0245],
        [0.0189, 0.0051, 0.0245],
        [0.0189, 0.0051, 0.0245],
        [0.0189, 0.0051, 0.0245]], grad_fn=<AddmmBackward>) tensor([0., 2., 0., 2.])
epoch = 4/10, step = 200/595, loss = 1.0931
tensor([[0.0184, 0.0076, 0.0226],
        [0.0184, 0.0076, 0.0226],
        [0.0184, 0.0076, 0.0226],
        [0.0184, 0.0076, 0.0226]], grad_fn=<AddmmBackward>) tensor([1., 2., 1., 0.])
epoch = 4/10, step = 300/595, loss = 1.1008
tensor([[-0.0005,  0.0347,  0.0144],
        [-0.0005,  0.0347,  0.0144],
        [-0.0005,  0.0347,  0.0144],
        [-0.0005,  0.0347,  0.0144]], grad_fn=<AddmmBackward>) tensor([1., 1., 0., 1.])
epoch = 4/10, step = 400/595, loss = 1.0890
tensor([[0.0122, 0.0191, 0.0173],
        [0.0122, 0.0191, 0.0173],
        [0.0122, 0.0191, 0.0173],
        [0.0122, 0.0191, 0.0173]], grad_fn=<AddmmBackward>) tensor([2., 1., 2., 2.])
epoch = 4/10, step = 500/595, loss = 1.0971
tensor([[ 0.0323, -0.0013,  0.0175],
        [ 0.0323, -0.0013,  0.0175],
        [ 0.0323, -0.0013,  0.0175],
        [ 0.0323, -0.0013,  0.0175]], grad_fn=<AddmmBackward>) tensor([2., 0., 0., 2.])
epoch = 5/10, step = 100/595, loss = 1.0900
tensor([[ 0.0335,  0.0183, -0.0032],
        [ 0.0335,  0.0183, -0.0032],
        [ 0.0335,  0.0183, -0.0032],
        [ 0.0335,  0.0183, -0.0032]], grad_fn=<AddmmBackward>) tensor([1., 2., 1., 2.])
epoch = 5/10, step = 200/595, loss = 1.1074
tensor([[0.0239, 0.0134, 0.0112],
        [0.0239, 0.0134, 0.0112],
        [0.0239, 0.0134, 0.0112],
        [0.0239, 0.0134, 0.0112]], grad_fn=<AddmmBackward>) tensor([1., 2., 0., 2.])
epoch = 5/10, step = 300/595, loss = 1.0999
tensor([[ 0.0394,  0.0244, -0.0153],
        [ 0.0394,  0.0244, -0.0153],
        [ 0.0394,  0.0244, -0.0153],
        [ 0.0394,  0.0244, -0.0153]], grad_fn=<AddmmBackward>) tensor([1., 1., 2., 2.])
epoch = 5/10, step = 400/595, loss = 1.1105
tensor([[ 0.0529,  0.0281, -0.0324],
        [ 0.0529,  0.0281, -0.0324],
        [ 0.0529,  0.0281, -0.0324],
        [ 0.0529,  0.0281, -0.0324]], grad_fn=<AddmmBackward>) tensor([2., 2., 2., 2.])
epoch = 5/10, step = 500/595, loss = 1.1478
tensor([[0.0104, 0.0371, 0.0011],
        [0.0104, 0.0371, 0.0011],
        [0.0104, 0.0371, 0.0011],
        [0.0104, 0.0371, 0.0011]], grad_fn=<AddmmBackward>) tensor([2., 1., 1., 1.])
epoch = 6/10, step = 100/595, loss = 1.0868
tensor([[0.0038, 0.0232, 0.0215],
        [0.0038, 0.0232, 0.0215],
        [0.0038, 0.0232, 0.0215],
        [0.0038, 0.0232, 0.0215]], grad_fn=<AddmmBackward>) tensor([2., 0., 1., 2.])
epoch = 6/10, step = 200/595, loss = 1.0973
tensor([[0.0094, 0.0273, 0.0119],
        [0.0094, 0.0273, 0.0119],
        [0.0094, 0.0273, 0.0119],
        [0.0094, 0.0273, 0.0119]], grad_fn=<AddmmBackward>) tensor([1., 0., 0., 2.])
epoch = 6/10, step = 300/595, loss = 1.1003
tensor([[ 0.0397, -0.0106,  0.0194],
        [ 0.0397, -0.0106,  0.0194],
        [ 0.0397, -0.0106,  0.0194],
        [ 0.0397, -0.0106,  0.0194]], grad_fn=<AddmmBackward>) tensor([0., 2., 2., 1.])
epoch = 6/10, step = 400/595, loss = 1.0980
tensor([[ 0.0321, -0.0099,  0.0264],
        [ 0.0321, -0.0099,  0.0264],
        [ 0.0321, -0.0099,  0.0264],
        [ 0.0321, -0.0099,  0.0264]], grad_fn=<AddmmBackward>) tensor([1., 1., 2., 2.])
epoch = 6/10, step = 500/595, loss = 1.1067
tensor([[ 0.0487, -0.0066,  0.0064],
        [ 0.0487, -0.0066,  0.0064],
        [ 0.0487, -0.0066,  0.0064],
        [ 0.0487, -0.0066,  0.0064]], grad_fn=<AddmmBackward>) tensor([2., 0., 0., 1.])
epoch = 7/10, step = 100/595, loss = 1.0908
tensor([[ 0.0628,  0.0229, -0.0372],
        [ 0.0628,  0.0229, -0.0372],
        [ 0.0628,  0.0229, -0.0372],
        [ 0.0628,  0.0229, -0.0372]], grad_fn=<AddmmBackward>) tensor([2., 2., 0., 0.])
epoch = 7/10, step = 200/595, loss = 1.1028
tensor([[ 0.0379,  0.0228, -0.0121],
        [ 0.0379,  0.0228, -0.0121],
        [ 0.0379,  0.0228, -0.0121],
        [ 0.0379,  0.0228, -0.0121]], grad_fn=<AddmmBackward>) tensor([0., 0., 0., 0.])
epoch = 7/10, step = 300/595, loss = 1.0771
tensor([[ 0.0359,  0.0242, -0.0116],
        [ 0.0359,  0.0242, -0.0116],
        [ 0.0359,  0.0242, -0.0116],
        [ 0.0359,  0.0242, -0.0116]], grad_fn=<AddmmBackward>) tensor([1., 1., 0., 1.])
epoch = 7/10, step = 400/595, loss = 1.0879
tensor([[ 0.0295,  0.0306, -0.0115],
        [ 0.0295,  0.0306, -0.0115],
        [ 0.0295,  0.0306, -0.0115],
        [ 0.0295,  0.0306, -0.0115]], grad_fn=<AddmmBackward>) tensor([1., 0., 2., 1.])
epoch = 7/10, step = 500/595, loss = 1.0952
tensor([[0.0272, 0.0016, 0.0197],
        [0.0272, 0.0016, 0.0197],
        [0.0272, 0.0016, 0.0197],
        [0.0272, 0.0016, 0.0197]], grad_fn=<AddmmBackward>) tensor([2., 1., 2., 0.])
epoch = 8/10, step = 100/595, loss = 1.0978
tensor([[0.0273, 0.0187, 0.0025],
        [0.0273, 0.0187, 0.0025],
        [0.0273, 0.0187, 0.0025],
        [0.0273, 0.0187, 0.0025]], grad_fn=<AddmmBackward>) tensor([1., 2., 1., 0.])
epoch = 8/10, step = 200/595, loss = 1.0980
tensor([[ 0.0573,  0.0246, -0.0333],
        [ 0.0573,  0.0246, -0.0333],
        [ 0.0573,  0.0246, -0.0333],
        [ 0.0573,  0.0246, -0.0333]], grad_fn=<AddmmBackward>) tensor([1., 0., 0., 0.])
epoch = 8/10, step = 300/595, loss = 1.0664
tensor([[ 0.0169,  0.0369, -0.0052],
        [ 0.0169,  0.0369, -0.0052],
        [ 0.0169,  0.0369, -0.0052],
        [ 0.0169,  0.0369, -0.0052]], grad_fn=<AddmmBackward>) tensor([2., 2., 1., 2.])
epoch = 8/10, step = 400/595, loss = 1.1096
tensor([[0.0025, 0.0081, 0.0379],
        [0.0025, 0.0081, 0.0379],
        [0.0025, 0.0081, 0.0379],
        [0.0025, 0.0081, 0.0379]], grad_fn=<AddmmBackward>) tensor([0., 1., 1., 0.])
epoch = 8/10, step = 500/595, loss = 1.1096
tensor([[-0.0015, -0.0033,  0.0534],
        [-0.0015, -0.0033,  0.0534],
        [-0.0015, -0.0033,  0.0534],
        [-0.0015, -0.0033,  0.0534]], grad_fn=<AddmmBackward>) tensor([1., 0., 0., 1.])
epoch = 9/10, step = 100/595, loss = 1.1176
tensor([[0.0130, 0.0270, 0.0085],
        [0.0130, 0.0270, 0.0085],
        [0.0130, 0.0270, 0.0085],
        [0.0130, 0.0270, 0.0085]], grad_fn=<AddmmBackward>) tensor([0., 0., 0., 1.])
epoch = 9/10, step = 200/595, loss = 1.0983
tensor([[ 0.0625,  0.0357, -0.0496],
        [ 0.0625,  0.0357, -0.0496],
        [ 0.0625,  0.0357, -0.0496],
        [ 0.0625,  0.0357, -0.0496]], grad_fn=<AddmmBackward>) tensor([1., 2., 0., 1.])
epoch = 9/10, step = 300/595, loss = 1.0949
tensor([[ 0.0622, -0.0005, -0.0132],
        [ 0.0622, -0.0005, -0.0132],
        [ 0.0622, -0.0005, -0.0132],
        [ 0.0622, -0.0005, -0.0132]], grad_fn=<AddmmBackward>) tensor([1., 0., 0., 2.])
epoch = 9/10, step = 400/595, loss = 1.0876
tensor([[ 0.0217, -0.0025,  0.0293],
        [ 0.0217, -0.0025,  0.0293],
        [ 0.0217, -0.0025,  0.0293],
        [ 0.0217, -0.0025,  0.0293]], grad_fn=<AddmmBackward>) tensor([1., 0., 2., 0.])
epoch = 9/10, step = 500/595, loss = 1.0973
tensor([[ 0.0379, -0.0200,  0.0306],
        [ 0.0379, -0.0200,  0.0306],
        [ 0.0379, -0.0200,  0.0306],
        [ 0.0379, -0.0200,  0.0306]], grad_fn=<AddmmBackward>) tensor([1., 0., 2., 1.])
epoch = 10/10, step = 100/595, loss = 1.1080
tensor([[ 0.0466,  0.0045, -0.0025],
        [ 0.0466,  0.0045, -0.0025],
        [ 0.0466,  0.0045, -0.0025],
        [ 0.0466,  0.0045, -0.0025]], grad_fn=<AddmmBackward>) tensor([2., 2., 1., 1.])
epoch = 10/10, step = 200/595, loss = 1.1141
tensor([[0.0321, 0.0124, 0.0040],
        [0.0321, 0.0124, 0.0040],
        [0.0321, 0.0124, 0.0040],
        [0.0321, 0.0124, 0.0040]], grad_fn=<AddmmBackward>) tensor([2., 0., 1., 2.])
epoch = 10/10, step = 300/595, loss = 1.1017
tensor([[0.0204, 0.0120, 0.0162],
        [0.0204, 0.0120, 0.0162],
        [0.0204, 0.0120, 0.0162],
        [0.0204, 0.0120, 0.0162]], grad_fn=<AddmmBackward>) tensor([1., 2., 0., 0.])
epoch = 10/10, step = 400/595, loss = 1.0976
tensor([[0.0092, 0.0228, 0.0165],
        [0.0092, 0.0228, 0.0165],
        [0.0092, 0.0228, 0.0165],
        [0.0092, 0.0228, 0.0165]], grad_fn=<AddmmBackward>) tensor([1., 2., 0., 0.])
epoch = 10/10, step = 500/595, loss = 1.1004
acc = 34.22818791946309
Accuracy of 0: 0.0, correct predictions = 0
Accuracy of 1: 0.0, correct predictions = 0
Accuracy of 2: 100.0, correct predictions = 204

Process finished with exit code 0

无论输入是什么,它都会给我相同的预测

这是模型:

import torch
import torchvision
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from torchvision.transforms import transforms
import torch.nn as nn
import pandas as pd
import numpy as np

num_classes = 3
batch_size = 4
hidden_size = 100
input_size = 196
learning_rate = 0.01
num_epochs = 10


class price_dataset():
    def __init__(self):
        xy = pd.read_csv('data_balanced.csv')

        self.n_samples = xy.shape[0]

        xy = xy.to_numpy()
        self.x_data = torch.from_numpy(xy[:, 7:203].astype(np.float32))
        self.y_data = torch.from_numpy(xy[:, 6].astype(np.float32))

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.n_samples


dataset = price_dataset()

train_dataset, test_dataset = train_test_split(dataset, test_size=0.20, random_state=0)

# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)



# print(example_data)

class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(NeuralNet, self).__init__()
        self.input_size = input_size
        self.l1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.l2 = nn.Linear(hidden_size, num_classes)

    def forward(self, inputs):
        out = self.l1(inputs)
        out = self.relu(out)
        out = self.l2(out)
        return out


model = NeuralNet(input_size, hidden_size, num_classes)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

n_total_steps = len(train_loader)
for epoch in range(num_epochs):
    for i, (input, labels) in enumerate(train_loader):

        y_pred = model(input)

        loss = criterion(y_pred, labels.long())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # print(f'prediction = {y_pred[0]}, label = {labels[0]}')

        if (i + 1) % 100 == 0:
            print(y_pred, labels)
            print(f'epoch = {epoch + 1}/{num_epochs}, step = {i + 1}/{n_total_steps}, loss = {loss.item():.4f}')

with torch.no_grad():
    n_correct = 0
    n_samples = 0
    n_class_correct = [0 for i in range(3)]
    n_class_samples = [0 for i in range(3)]

    for input, labels in test_loader:
        labels = torch.flatten(labels)
        y_pred = model(input)

        _, prediction = torch.max(y_pred.data, 1)

        # print(f'prediction = {prediction[0]}, label = {labels[0]}')
        n_samples += labels.shape[0]
        # n_correct += (prediction == labels).sum().item()

        for i in range(batch_size):
            label = labels[i]
            pred = prediction[i]

            if (label==pred):
                n_class_correct[int(label)] +=1

            n_class_samples[int(label)] += 1

        for pred, label in zip(prediction, labels):
            # print(f'prediction = {pred}, label = {label}')
            if pred != 0 and label !=0 and pred == label:
                n_correct += (pred == label).item()

    acc = 100 * (n_correct / n_samples)
    print(f'acc = {acc}')

    for i in range(3):
        acc = 100 * n_class_correct[i]/n_class_samples[i]
        print(f'Accuracy of {i}: {acc}, correct predictions = {n_class_correct[i]}')

模型有问题吗?我尝试更改优化器和学习率,但没有任何改变我也尝试更改隐藏层大小,但它也没有解决问题

数据链接:https ://docs.google.com/spreadsheets/d/12lFXSnA_mOxRX-JBoMGyMcEEedmFU0eIEMbf5ne2a14/edit?usp=sharing

标签: pythonpytorch

解决方案


推荐阅读