diff --git a/Master.lua b/Master.lua index a8b07f5..7186228 100644 --- a/Master.lua +++ b/Master.lua @@ -78,6 +78,7 @@ function Master:train() self:catchSigInt() local reward, state, terminal = 0, self.env:start(), false + local nextAction -- Set environment and agent to training mode self.env:training() @@ -93,11 +94,17 @@ function Master:train() for step = initStep, self.opt.steps do self.globals.step = step -- Pass step number to globals for use in other modules - -- Observe results of previous transition (r, s', terminal') and choose next action (index) - local action = self.agent:observe(reward, state, terminal) -- As results received, learn in training mode + local action + if nextAction then + -- Allow environment to control next action + action = nextAction + else + -- Observe results of previous transition (r, s', terminal') and choose next action (index) + action = self.agent:observe(reward, state, terminal) -- As results received, learn in training mode + end if not terminal then -- Act on environment (to cause transition) - reward, state, terminal = self.env:step(action) + reward, state, terminal, nextAction = self.env:step(action) -- Track score episodeScore = episodeScore + reward else diff --git a/README.md b/README.md index a1080a0..f0b971f 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,8 @@ You can use a custom environment (as the path to a Lua file/`rlenvs`-namespaced If the environment has separate behaviour during training and testing it should also implement `training` and `evaluate` methods - otherwise these will be added as empty methods during runtime. The environment can also implement a `getDisplay` method (with a mandatory `getDisplaySpec` method for determining screen size) which will be used for displaying the screen/computing saliency maps, where `getDisplay` must return a RGB (3D) tensor; this can also be utilised even if the state is not an image (although saliency can only be computed for states that are images). This **must** be implemented to have a visual display/computing saliency maps. The `-zoom` factor can be used to increase the size of small displays. +Custom environments can also control the action selection process, bypassing the DQN forward pass and instead specifying the next action to be executed from within the environment itself. This allows the agent to learn from hand-crafted behaviours, human experts or pre-planned sequences, with or without integrating action selections from the network. To achieve this environments can optionally return `nextAction` from the `step` method. i.e. `return reward, state, terminal, nextAction`. + You can also use a custom model (body) with `-modelBody`, which replaces the usual DQN convolutional layers with a custom Torch neural network (as the path to a Lua file/`models`-namespaced environment). The class must include a `createBody` method which returns the custom neural network. The model will receive a stack of the previous states (as determined by `-histLen`), and must reshape them manually if needed. The DQN "heads" will then be constructed as normal, with `-hiddenSize` used to change the size of the fully connected layer if needed. For an example on a GridWorld environment, run `./run.sh demo-grid` - the demo also works with `qlua` and experience replay agents. The custom environment and network can be found in the [examples](https://github.com/Kaixhin/Atari/tree/master/examples) folder. diff --git a/async/A3CAgent.lua b/async/A3CAgent.lua index c507efe..2953e8f 100644 --- a/async/A3CAgent.lua +++ b/async/A3CAgent.lua @@ -39,6 +39,7 @@ function A3CAgent:learn(steps, from) log.info('A3CAgent starting | steps=%d', steps) local reward, terminal, state = self:start() + local nextAction self.states:resize(self.batchSize, table.unpack(state:size():totable())) @@ -50,12 +51,18 @@ function A3CAgent:learn(steps, from) self.batchIdx = self.batchIdx + 1 self.states[self.batchIdx]:copy(state) - local V, probability = table.unpack(self.policyNet_:forward(state)) - local action = torch.multinomial(probability, 1):squeeze() + local action + if nextAction then + -- Allow environment to control next action + action = nextAction + self.actionOffset + else + local V, probability = table.unpack(self.policyNet_:forward(state)) + action = torch.multinomial(probability, 1):squeeze() + end self.actions[self.batchIdx] = action - reward, terminal, state = self:takeAction(action) + reward, terminal, state, nextAction = self:takeAction(action) self.rewards[self.batchIdx] = reward self:progress(steps) @@ -98,7 +105,7 @@ function A3CAgent:accumulateGradients(terminal, state) local gradEntropy = torch.log(probability) + 1 -- Add to target to improve exploration (prevent convergence to suboptimal deterministic policy) self.policyTarget:add(self.beta, gradEntropy) - + self.policyNet_:backward(self.states[i], self.targets) end end diff --git a/async/AsyncAgent.lua b/async/AsyncAgent.lua index 39ddb71..419357d 100644 --- a/async/AsyncAgent.lua +++ b/async/AsyncAgent.lua @@ -78,7 +78,7 @@ end function AsyncAgent:takeAction(action) - local reward, rawObservation, terminal = self.env:step(action - self.actionOffset) + local reward, rawObservation, terminal, nextAction = self.env:step(action - self.actionOffset) if self.rewardClip > 0 then reward = math.max(reward, -self.rewardClip) reward = math.min(reward, self.rewardClip) @@ -91,7 +91,7 @@ function AsyncAgent:takeAction(action) self.stateBuffer:push(observation) end - return reward, terminal, self.stateBuffer:readAll() + return reward, terminal, self.stateBuffer:readAll(), nextAction end diff --git a/async/NStepQAgent.lua b/async/NStepQAgent.lua index 520de07..156c7f4 100644 --- a/async/NStepQAgent.lua +++ b/async/NStepQAgent.lua @@ -31,6 +31,7 @@ function NStepQAgent:learn(steps, from) log.info('NStepQAgent starting | steps=%d | ε=%.2f -> %.2f', steps, self.epsilon, self.epsilonEnd) local reward, terminal, state = self:start() + local nextAction self.states:resize(self.batchSize, table.unpack(state:size():totable())) self.tic = torch.tic() @@ -41,10 +42,16 @@ function NStepQAgent:learn(steps, from) self.batchIdx = self.batchIdx + 1 self.states[self.batchIdx]:copy(state) - local action = self:eGreedy(state, self.policyNet_) + local action + if nextAction then + -- Allow environment to control next action + action = nextAction + self.actionOffset + else + action = self:eGreedy(state, self.policyNet_) + end self.actions[self.batchIdx] = action - reward, terminal, state = self:takeAction(action) + reward, terminal, state, nextAction = self:takeAction(action) self.rewards[self.batchIdx] = reward self:progress(steps) diff --git a/async/OneStepQAgent.lua b/async/OneStepQAgent.lua index 4a75416..05220ef 100644 --- a/async/OneStepQAgent.lua +++ b/async/OneStepQAgent.lua @@ -24,13 +24,18 @@ function OneStepQAgent:learn(steps, from) log.info('%s starting | steps=%d | ε=%.2f -> %.2f', self.agentName, steps, self.epsilon, self.epsilonEnd) local reward, terminal, state = self:start() - local action, state_ + local action, state_, nextAction self.tic = torch.tic() for step1=1,steps do if not terminal then - action = self:eGreedy(state, self.policyNet) - reward, terminal, state_ = self:takeAction(action) + if nextAction then + -- Allow environment to control next action + action = nextAction + self.actionOffset + else + action = self:eGreedy(state, self.policyNet) + end + reward, terminal, state_, nextAction = self:takeAction(action) else reward, terminal, state_ = self:start() end