QValues爆炸DQN

问题描述

我正在尝试遵循有关QLearning的pytorch tutorial。我保留了核心,但是尝试以不同的方式实现它。但是,我偶然发现了问题。在最佳情况下,该算法似乎无法解决环境问题,而在最坏情况下,则会发生预测QValue爆炸。我找不到根本原因

我试图调整大多数超参数,降低学习率,然后我检查了几次代码,以查看我的代码和pytorch网站上的代码之间是否存在差异。

有人知道什么可能导致此行为吗?还是看到代码中的错误?我从堆栈溢出中检查了大多数帖子,其中大多数都是关于学习率或目标网络的。我的代码中有一个目标网络,我尝试降低LR,但不幸的是它似乎无法解决问题

import gym
import torch
import random
import numpy as np
from collections import deque
from copy import deepcopy
import torch.nn as nn
import torch.optim as optim
from itertools import count
import torch.nn.functional as F
from collections import namedtuple

GAMMA = 0.99
EPSILON_END = 0.05
EPSILON_START = 0.9
EPSILON_DECAY = 500
LEARNING_RATE = 0.0001

ACTION_SPACE = 4
OBSERVATION_SPACE = 8

BATCH_SIZE = 32
MEMORY_CAPACITY = 10000
TARGET_NETWORK_UPDATE_FREQUENCY = 2

Experience = namedtuple('Experience',['state','action','reward','next_state'])


class Network(nn.Module):

    def __init__(self,input_features,output_features):
        super().__init__()
        self.fc_1 = nn.Linear(input_features,100)
        self.bn_1 = nn.Layernorm(100)
        self.fc_2 = nn.Linear(100,100)
        self.bn_2 = nn.Layernorm(100)
        self.fc_3 = nn.Linear(100,output_features)

    def forward(self,x):
        x = F.relu(self.bn_1(self.fc_1(x)))
        x = F.relu(self.bn_2(self.fc_2(x)))
        return self.fc_3(x)


class Memory(object):

    def __init__(self,capacity):
        self.capacity = capacity
        self.memory = []
        self.position = 0

    def push(self,experience):
        if len(self.memory) < self.capacity:
            self.memory.append(None)
        self.memory[self.position] = experience
        self.position = (self.position + 1) % self.capacity

    def sample(self,batch_size):
        return random.sample(self.memory,batch_size)

    def __len__(self):
        return len(self.memory)


class Agent:

    def __init__(self,policy_network,action_space):
        self.policy_network = policy_network
        self.actions = list(range(action_space))

    def __call__(self,observation,step_id=None):
        if np.random.random() > get_epsilon(step_id):
            with torch.no_grad():
                predictions = self.policy_network(torch.tensor(observation)).detach().numpy()
                return np.argmax(predictions)
        else:
            return np.random.choice(self.actions)


class QLearning:

    def __init__(self,agent: Agent,target_network: nn.Module,optimizer,memory_capacity,batch_size):
        self.step = 0
        self.agent = agent
        self.target_network = target_network
        self.optimizer = optimizer
        self.batch_size = batch_size
        self.memory = Memory(memory_capacity)

    def train(self,num_episodes):
        env = gym.make('LunarLander-v2')
        recent_scores = deque(maxlen=100)
        for episode_id in range(num_episodes):
            observation = env.reset()
            score = 0
            for t in count():
                action = self.agent(observation,self.step)
                observation_,reward,done,info = env.step(action.item())

                observation_ = None if done else observation_
                experience = self.preprocess_experience(observation,action,observation_)
                self.memory.push(experience)

                q_values = self.learn()
                self.step += 1
                score += reward

                if done is True:
                    break

            recent_scores.append(score)

            if episode_id % TARGET_NETWORK_UPDATE_FREQUENCY == 0:
                self.update_target_network()

            if episode_id % 10 == 0:
                print(episode_id,np.mean(recent_scores),t)  # Todo Tensorboard
                print('sample of the state_action_values :',list(q_values))

    def learn(self):
        if len(self.memory) < self.batch_size:
            return
        experiences = self.memory.sample(self.batch_size)
        batch = Experience(*zip(*experiences))

        states = torch.cat(batch.state)
        actions = torch.cat(batch.action)
        rewards = torch.cat(batch.reward)
        state_action_values = self.agent.policy_network(states).gather(1,actions)

        non_terminal_mask = torch.tensor(tuple(map(lambda s: s is not None,batch.next_state)),dtype=torch.bool)
        non_terminal_next_states = torch.cat([s for s in batch.next_state if s is not None])

        next_state_values = torch.zeros(BATCH_SIZE)
        next_state_values[non_terminal_mask] = self.target_network(non_terminal_next_states).max(1)[0].detach()

        expected_state_action_values = (next_state_values * GAMMA) + rewards
        expected_state_action_values = expected_state_action_values.unsqueeze(1)

        loss = F.smooth_l1_loss(state_action_values,expected_state_action_values)
        self.optimizer.zero_grad()
        loss.backward()

        for param in self.agent.policy_network.parameters():
            param.grad.data.clamp_(-1,1)
        self.optimizer.step()

        return state_action_values.detach().tolist()[:3] # This is just to monitor if there is an issue with the values predicted

    def update_target_network(self):
        self.target_network.load_state_dict(self.agent.policy_network.state_dict())

    @staticmethod
    def preprocess_experience(state,next_state):
        state = torch.tensor(state).unsqueeze(0)
        next_state = torch.tensor(next_state).unsqueeze(0) if next_state is not None else None
        action = torch.tensor(action,dtype=torch.int64).view(1,1)
        reward = torch.tensor(reward,dtype=torch.float32).view(1)
        return Experience(state,next_state)


def get_networks_and_optimizer(learning_rate,observation_space,action_space):
    policy_network = Network(observation_space,action_space)
    target_network = deepcopy(policy_network)
    target_network.eval()
    policy_network_optimizer = optim.RMSprop(policy_network.parameters(),learning_rate)
    return policy_network,target_network,policy_network_optimizer


def get_epsilon(step):
    if step is None:
        return EPSILON_END
    return EPSILON_END + (EPSILON_START - EPSILON_END) * np.exp(-1. * step / EPSILON_DECAY)


if __name__ == '__main__':
    policy_network,policy_network_optimizer = get_networks_and_optimizer(LEARNING_RATE,OBSERVATION_SPACE,ACTION_SPACE)
    agent = Agent(policy_network,ACTION_SPACE)
    trainer = QLearning(agent,policy_network_optimizer,MEMORY_CAPACITY,BATCH_SIZE)
    trainer.train(10000)

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)