首页 > 解决方案 > Keras Double DQN 平均奖励随时间递减且无法收敛

问题描述

我正在尝试教一个 Double DQN 代理运行一个网格世界,其中有一个搜索者(代理)将尝试收集所有随机产生的隐藏者。每一步的 path_cost 为 -0.1,如果收集到一个躲藏者,则会收到 1 的奖励。DQN 网络接收一个形状为 (world_width,world_height,1) 的数组作为状态,它是从上面看到的环境的完整转换,其中空白空间被描述为 0,搜索者为 2,隐藏者为 3。代理是然后应该选择一个动作,左,上,右或下。环境的示例配置如下图所示。

网格世界

然而,当训练我的代理时,奖励最初会随着探索的减少而减少,因此可以假设当代理遵循 DQN 网络时,它的表现会比随机选择动作时更差。以下是我在使用不同超参数进行训练时收到的奖励图的一些示例(y 轴是总步数,每集是 100 步,除非它完成)。

奖励图

正如所见,代理在解决环境方面变得更糟,大约在epsilon等于我min_epsilon的时候曲线稳定(意味着几乎没有探索或随机移动)。

我尝试了不同的超参数,但结果没有任何明显差异,如果有人能给我指出问题所在的指针,我将不胜感激。

我主要使用的超参数是:

wandb.config.epsilon           = 1.0
wandb.config.epsilon_decay     = 0.99
wandb.config.batch_size        = 32
wandb.config.learning_rate     = 1e-3
wandb.config.gamma             = 0.8
wandb.config.min_epsilon       = 1e-1
wandb.config.buffersize        = 10000
wandb.config.epochs            = 1
wandb.config.reward_discount   = 0.01
wandb.config.episodes          = 1000

这是我的代码:

import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import Adam
from collections import deque
from termcolor import colored
import wandb
from wandb.keras import WandbCallback
import numpy as np
import copy, os, random
from argparse import ArgumentParser

from plotter import plotter
from HNS import HNS

tf.keras.backend.set_floatx('float64')

wandb.init(name=name, project=project)


wandb.env.name                 = "HNS"
wandb.env.world_size           = (8, 8)
wandb.env.state_dim            = (8, 8, 1)
wandb.env.hider_count          = 2
wandb.env.action_dim           = 4
wandb.env.random_spawn         = True
wandb.env.max_steps            = 100 

wandb.config.node              = node

wandb.config.epsilon           = 1.0
wandb.config.epsilon_decay     = 0.99
wandb.config.batch_size        = 32
wandb.config.learning_rate     = 1e-3
wandb.config.gamma             = 0.8
wandb.config.min_epsilon       = 1e-1
wandb.config.buffersize        = 10000
wandb.config.epochs            = 1
wandb.config.reward_discount   = 0.01
wandb.config.episodes          = 1000

wandb.config.conv1_kernel      = (8,8)
wandb.config.conv1_filters     = 16
wandb.config.conv1_strides     = 4
wandb.config.conv1_activation  = "relu"
wandb.config.conv1_padding     = "same"

wandb.config.conv2_kernel      = (4,4)
wandb.config.conv2_filters     = 32
wandb.config.conv2_strides     = 4
wandb.config.conv2_activation  = "relu"
wandb.config.conv2_padding     = "same"

wandb.config.dense1_neurons    = 16
wandb.config.dense1_activation = "relu"

wandb.config.loss              = "mse"

parser = ArgumentParser()
parser.add_argument('--hider_count',     type=int,   default=wandb.env.hider_count)
parser.add_argument('--max_steps',       type=int,   default=wandb.env.max_steps)
parser.add_argument('--epsilon_decay',   type=float, default=wandb.config.epsilon_decay)
parser.add_argument('--min_epsilon',     type=float, default=wandb.config.min_epsilon)
parser.add_argument('--learning_rate',   type=float, default=wandb.config.learning_rate)
parser.add_argument('--gamma',           type=float, default=wandb.config.gamma)
parser.add_argument('--reward_discount', type=float, default=wandb.config.reward_discount)
parser.add_argument('--episodes',        type=int,   default=wandb.config.episodes)
parser.add_argument('--batch_size',      type=int,   default=wandb.config.batch_size)

args, unknown = parser.parse_known_args()
wandb.config.update(args, allow_val_change=True)


class ReplayBuffer:
    def __init__(self):
        self.buffer = deque(maxlen=wandb.config.buffersize)

    def put(self, state, action, reward, next_state, done):
        self.buffer.append([state, action, reward, next_state, done])

    def sample(self):
        sample = random.sample(self.buffer, wandb.config.batch_size)
        states, actions, rewards, next_states, done = map(np.asarray, zip(*sample))
        return states, actions, rewards, next_states, done

    def size(self):
        return len(self.buffer)


class ActionStatemodel:
    def __init__(self):
        self.epsilon = wandb.config.epsilon
        self.model = self.create_model()

    def create_model(self):
        # Init model
        model = tf.keras.Sequential()

        # Set up layers
        model.add(Conv2D(filters=wandb.config.conv1_filters, kernel_size=wandb.config.conv1_kernel, activation=wandb.config.conv1_activation,
                         strides=wandb.config.conv1_strides, padding=wandb.config.conv1_padding, name="conv_1", input_shape=wandb.env.state_dim))
        model.add(Conv2D(filters=wandb.config.conv2_filters, kernel_size=wandb.config.conv2_kernel, activation=wandb.config.conv2_activation,
                         strides=wandb.config.conv2_strides, padding=wandb.config.conv2_padding, name="conv_2"))

        model.add(Flatten())
        model.add(Dense(units=wandb.config.dense1_neurons, activation=wandb.config.dense1_activation,  name="dense_1"))
        model.add(Dense(wandb.env.action_dim, name="dense_2"))

        # Finalize model
        model.compile(loss=wandb.config.loss, optimizer=Adam(wandb.config.learning_rate))
        model.summary()

        return model

    # Get q-values from state
    def predict(self, state):
        return self.model.predict(state)

    # Get action from
    def get_action(self, state):
        # Predict action
        state = np.expand_dims(state, axis=0)
        q_value = self.predict(state)

        if np.random.random() < self.epsilon: return random.randint(0, wandb.env.action_dim - 1), 1
        else: return np.argmax(q_value), 0


    def train(self, states, targets):
        history = self.model.fit(states, targets, epochs=wandb.config.epochs, callbacks=[WandbCallback()], verbose=2, use_multiprocessing=True)
        return history.history["loss"][0]

class Agent:
    def __init__(self, env):
        self.env = env

        self.predict_net = ActionStatemodel()
        self.target_net = ActionStatemodel()
        self.target_update()

        self.buffer = ReplayBuffer()

    # Copy weights from model to target_model
    def target_update(self):
        weights = self.predict_net.model.get_weights()
        self.target_net.model.set_weights(weights)

    def replay(self):
        loss = 0
        for _ in range(5):
            states, actions, rewards, next_states, done = self.buffer.sample()

            # Collect predicted actions from predict_net
            predicted_q_values = self.predict_net.predict(next_states)
            predicted_actions = np.argmax(predicted_q_values, axis=1)

            # Get q values from target_net of above predicted actions
            target_q_values = self.target_net.predict(next_states)
            target_action_q_values = [np.take(target_q_values[i], predicted_actions[i]) for i in range(len(target_q_values))]

            # Create targets based on q values, reward and done
            targets = predicted_q_values.copy()
            targets[range(wandb.config.batch_size), actions] = rewards + (1 - done) * target_action_q_values * args.gamma

            loss += self.predict_net.train(states, targets)
        return loss

    def train(self):
        # Save weights for heatmap rendering

        # Main training loop
        for ep in range(wandb.config.episodes):

            # Initialization
            done, total_reward, step, loss, exploration = False, 0, 0, 0, 0
            state = self.env.reset()
            while not done and step < wandb.env.max_steps:

                # Predict and perform action
                action, e = self.predict_net.get_action(state)
                exploration += e
                next_state, reward, done, _ = self.env.step(action)
                self.buffer.put(state, action, reward * wandb.config.reward_discount, next_state, done)
                total_reward += reward

                if self.buffer.size() >= 1000 and step % 10 == 0: 
                    loss = self.replay()

                state = next_state
                step += 1

            self.target_update()

            # Update epsilon
            self.predict_net.epsilon = max(wandb.config.epsilon_decay * self.predict_net.epsilon, wandb.config.min_epsilon)


            # Calculate weights change and log weights
            pre_weights = self.get_weights(self.predict_net.model.layers)
            tar_weights = self.get_weights(self.target_net.model.layers)

            # LOG
            print(colored("EP" + str(ep) + "-Reward: " + str(total_reward) + " Done: " + str(done), "green"))
            wandb.log({"episode"      : ep,
                       "buffersize"   : self.buffer.size(),
                       "EpReward"     : total_reward,
                       "epsilon"      : self.predict_net.epsilon,
                       "done"         : int(done),
                       "Exploration"  : exploration / _,
                       "loss"         : loss,
                       "pre_weights"  : pre_weights,
                       "tar_weights"  : tar_weights
                       })
                       # "weigthUpdate" : wandb.Image(neuron_map),


    # Get weights and names for every layer of nn model
    def get_weights(self, layers):
        weigths = []
        names = []
        for layer in layers:
            wb = layer.get_weights()
            if wb:
                weigths.append(wb[0].flatten())
                names.append(layer.name)
        return weigths, names


if __name__ == "__main__":
    env = HNS(random_spawn=wandb.env.random_spawn, world_size=wandb.env.world_size, hider_count=wandb.env.hider_count)
    agent = Agent(env=env)
    agent.train()
    agent.target_net.model.save(os.path.join(wandb.run.dir, "model.h5"))

标签: pythontensorflowkerasreinforcement-learningdqn

解决方案


推荐阅读