首页 > 解决方案 > 想要优化我的 NN 的超参数 - 实现调子不起作用

问题描述

所以正如我的标题所说..我想优化我的超参数。我是NN世界里的菜鸟。我用 pytorch 构建了 NN,出于某种原因,实现这样的优化器对我来说并不容易。

这是我的NN:

class MyNN(nn.Module):
    def __init__(self):
        super(MyNN, self).__init__()
        in_features = feature_count # MEHR FEATURES DANN MEHR
    out_features = 5 # MEHR TRICKS MEHR OUTPUT
    
    hidden_layer_units = [320, 160] #Neuronen der 2 hidden Laysers -> modifizierbar
    dropout = 0.4 # Methode gegen Overfitting = Prozentsatz an Neuronen die ausgeschalten werden, 
    #so dass keine abhängigkeiten entstehen
    
    #bias = verschiebung im "koordinatensystem auf der y-Achse" "weg vom Ursprung"
    #Aufbau von NN
    self.lin1 = nn.Linear(in_features, hidden_layer_units[0], bias=True)
    self.lin2 = nn.Linear(hidden_layer_units[0], hidden_layer_units[1], bias=True)
    self.lin3 = nn.Linear(hidden_layer_units[1], out_features, bias=True)
    self.dropout = nn.Dropout(p=dropout, inplace=True)
    self.ReLU = nn.ReLU() #Aktivierungsfunktion ReLU = gerichtete lineare Funktion | häufig verwendet
    
    #Normierung der Daten
    self.batch_norm = nn.BatchNorm1d(in_features)
    self.batch_norm1 = nn.BatchNorm1d(hidden_layer_units[0])
    self.batch_norm2 = nn.BatchNorm1d(hidden_layer_units[1])
    

def forward(self,x):
    x = self.batch_norm(x)
    x = self.dropout(self.ReLU(self.lin1(x)))
    x = self.batch_norm1(x)
    x = self.dropout(self.ReLU(self.lin2(x)))
    x = self.batch_norm2(x)
    x = self.lin3(x)

    return F.log_softmax(x, dim=1)

和更多:

model = MyNN()
device = "cuda" if torch.cuda.is_available() else "cpu" #versuch die GPU statt dem Prozessor zu verwenden. cuda = GPU
print(device)

optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) #0.0001 LernRate

best_model = None
best_val = None
best_epoch = None
num_epochs = 3_500 #Lern Epochen

train_loss_list = []
val_loss_list = []
acc_list = []

for epoch in tqdm(range(num_epochs), leave=False):

# TRAIN
model.train()
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(X_train_tensor)

# Compute and print loss
loss = F.nll_loss(y_pred, y_train_tensor) #loss = Verlustfunktion | nll = negative Ln likelyhood

# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad() # optimizer
loss.backward()
optimizer.step()

# TEST
model.eval()
y_pred = model(X_test_tensor)

val_loss = F.nll_loss(y_pred, y_test_tensor) #Validierungsverlust


if  best_model == None:
    best_model = model
    best_val = val_loss
    best_epoch = epoch
else:
    if val_loss < best_val:
        best_model = model
        best_val = val_loss
        best_epoch = epoch


y_numpy = y_pred.detach().numpy()
y_numpy = np.argmax(y_numpy, axis=1)
y_acc = sum(y_numpy == y_test_tensor.detach().numpy())/len(y_numpy) * 100
y_acc = int(y_acc)


train_loss_list.append(loss.item())
val_loss_list.append(val_loss.item())
acc_list.append(y_acc)


model.eval()
y_pred = model(X_test_tensor)
y_numpy = y_pred.detach().numpy()
y_numpy = np.argmax(y_numpy, axis=1)

之后我尝试实施优化:

import torch.optim as optim
from ray import tune
from ray.tune.examples.mnist_pytorch import get_data_loaders, ConvNet, train, test

analysis = tune.run(
    MyNN, config={"lr": tune.grid_search([0.001, 0.01, 0.1])})

print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))

导致错误:

TypeError: ('Second argument must be convertable to Trainable', <class ' main .MyNN'>) 其他选项:-尝试通过调用来重现问题pickle.dumps(trainable)。- 如果错误与键入有关,请尝试删除类型注释并重试。

在这一点上,我有点迷失了,我现在尝试了 3 种方法来实现它,但都没有奏效。请发送一些 huulp :P 如果您需要更多信息,请告诉我

有一个美好的一天克莱门斯

标签: gridsearchcv

解决方案


推荐阅读