Skip to content

Commit

Permalink
Merge pull request rlcode#68 from jcwleo/master
Browse files Browse the repository at this point in the history
Add DQN with PER
  • Loading branch information
jcwleo authored Nov 28, 2017
2 parents a497d71 + 560e8dd commit 2fe6984
Show file tree
Hide file tree
Showing 2 changed files with 279 additions and 0 deletions.
55 changes: 55 additions & 0 deletions 2-cartpole/1-dqn/SumTree.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import numpy


class SumTree:
write = 0

def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros(2 * capacity - 1)
self.data = numpy.zeros(capacity, dtype=object)

def _propagate(self, idx, change):
parent = (idx - 1) // 2

self.tree[parent] += change

if parent != 0:
self._propagate(parent, change)

def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1

if left >= len(self.tree):
return idx

if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s - self.tree[left])

def total(self):
return self.tree[0]

def add(self, p, data):
idx = self.write + self.capacity - 1

self.data[self.write] = data
self.update(idx, p)

self.write += 1
if self.write >= self.capacity:
self.write = 0

def update(self, idx, p):
change = p - self.tree[idx]

self.tree[idx] = p
self._propagate(idx, change)

def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1

return (idx, self.tree[idx], self.data[dataIdx])
224 changes: 224 additions & 0 deletions 2-cartpole/1-dqn/cartpole_only_per.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
import sys
import gym
import pylab
import random
import numpy as np
from SumTree import SumTree
from collections import deque
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential

EPISODES = 300


# 카트폴 예제에서의 DQN 에이전트
class DQNAgent:
def __init__(self, state_size, action_size):
self.render = False
self.load_model = False

# 상태와 행동의 크기 정의
self.state_size = state_size
self.action_size = action_size

# DQN 하이퍼파라미터
self.discount_factor = 0.99
self.learning_rate = 0.001
self.epsilon = 1.0
self.epsilon_decay = 0.999
self.epsilon_min = 0.01
self.batch_size = 64
self.train_start = 2000
self.memory_size = 2000

# 리플레이 메모리, 최대 크기 2000
self.memory = Memory(self.memory_size)

# 모델과 타깃 모델 생성
self.model = self.build_model()
self.target_model = self.build_model()

# 타깃 모델 초기화
self.update_target_model()

if self.load_model:
self.model.load_weights("./save_model/cartpole_dqn_trained.h5")

# 상태가 입력, 큐함수가 출력인 인공신경망 생성
def build_model(self):
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(24, activation='relu',
kernel_initializer='he_uniform'))
model.add(Dense(self.action_size, activation='linear',
kernel_initializer='he_uniform'))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model

# 타깃 모델을 모델의 가중치로 업데이트
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights())

# 입실론 탐욕 정책으로 행동 선택
def get_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
else:
q_value = self.model.predict(state)
return np.argmax(q_value[0])

# 샘플 <s, a, r, s'>을 리플레이 메모리에 저장
def append_sample(self, state, action, reward, next_state, done):
if self.epsilon == 1:
done = True

# TD-error 를 구해서 같이 메모리에 저장
target = self.model.predict([state])
old_val = target[0][action]
target_val = self.target_model.predict([next_state])
if done:
target[0][action] = reward
else:
target[0][action] = reward + self.discount_factor * (
np.amax(target_val[0]))
error = abs(old_val - target[0][action])

self.memory.add(error, (state, action, reward, next_state, done))

# 리플레이 메모리에서 무작위로 추출한 배치로 모델 학습
def train_model(self):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay

# 메모리에서 배치 크기만큼 무작위로 샘플 추출
mini_batch = self.memory.sample(self.batch_size)

errors = np.zeros(self.batch_size)
states = np.zeros((self.batch_size, self.state_size))
next_states = np.zeros((self.batch_size, self.state_size))
actions, rewards, dones = [], [], []

for i in range(self.batch_size):
states[i] = mini_batch[i][1][0]
actions.append(mini_batch[i][1][1])
rewards.append(mini_batch[i][1][2])
next_states[i] = mini_batch[i][1][3]
dones.append(mini_batch[i][1][4])

# 현재 상태에 대한 모델의 큐함수
# 다음 상태에 대한 타깃 모델의 큐함수
target = self.model.predict(states)
target_val = self.target_model.predict(next_states)

# 벨만 최적 방정식을 이용한 업데이트 타깃
for i in range(self.batch_size):
old_val = target[i][actions[i]]
if dones[i]:
target[i][actions[i]] = rewards[i]
else:
target[i][actions[i]] = rewards[i] + self.discount_factor * (
np.amax(target_val[i]))
# TD-error를 저장
errors[i] = abs(old_val - target[i][actions[i]])

# TD-error로 priority 업데이트
for i in range(self.batch_size):
idx = mini_batch[i][0]
self.memory.update(idx, errors[i])

self.model.fit(states, target, batch_size=self.batch_size,
epochs=1, verbose=0)


class Memory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6

def __init__(self, capacity):
self.tree = SumTree(capacity)

def _getPriority(self, error):
return (error + self.e) ** self.a

def add(self, error, sample):
p = self._getPriority(error)
self.tree.add(p, sample)

def sample(self, n):
batch = []
segment = self.tree.total() / n

for i in range(n):
a = segment * i
b = segment * (i + 1)

s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
batch.append((idx, data))

return batch

def update(self, idx, error):
p = self._getPriority(error)
self.tree.update(idx, p)


if __name__ == "__main__":
# CartPole-v1 환경, 최대 타임스텝 수가 500
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n

# DQN 에이전트 생성
agent = DQNAgent(state_size, action_size)

scores, episodes = [], []

step = 0
for e in range(EPISODES):
done = False
score = 0
# env 초기화
state = env.reset()
state = np.reshape(state, [1, state_size])

while not done:
if agent.render:
env.render()
step += 1
# 현재 상태로 행동을 선택
action = agent.get_action(state)
# 선택한 행동으로 환경에서 한 타임스텝 진행
next_state, reward, done, info = env.step(action)
next_state = np.reshape(next_state, [1, state_size])
# 에피소드가 중간에 끝나면 -100 보상
r = reward if not done or score+reward == 500 else -10
# 리플레이 메모리에 샘플 <s, a, r, s'> 저장
agent.append_sample(state, action, r, next_state, done)
# 매 타임스텝마다 학습
if step >= agent.train_start:
agent.train_model()

score += reward
state = next_state

if done:
# 각 에피소드마다 타깃 모델을 모델의 가중치로 업데이트
agent.update_target_model()

# score = score if score == 500 else score + 100
# 에피소드마다 학습 결과 출력
scores.append(score)
episodes.append(e)
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/cartpole_dqn.png")
print("episode:", e, " score:", score, " memory length:",
step if step <= agent.memory_size else agent.memory_size, " epsilon:", agent.epsilon)

# 이전 10개 에피소드의 점수 평균이 490보다 크면 학습 중단
if np.mean(scores[-min(10, len(scores)):]) > 490:
agent.model.save_weights("./save_model/cartpole_dqn.h5")
sys.exit()

0 comments on commit 2fe6984

Please sign in to comment.