forked from hunkim/ReinforcementZeroToAll
-
Notifications
You must be signed in to change notification settings - Fork 0
/
05_q_table_frozenlake.py
57 lines (42 loc) · 1.59 KB
/
05_q_table_frozenlake.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0#.pjz9g59ap
import gym
from gym import wrappers
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
env = wrappers.Monitor(env, "gym-results")
action_space_n = 4
state_space_n = 16
# Initialize table with all zeros
Q = np.zeros([state_space_n, action_space_n])
# Set learning parameters
learning_rate = .85
dis = .99
num_episodes = 2000
# create lists to contain total rewards and steps per episode
rList = []
for i in range(num_episodes):
# Reset environment and get first new observation
state = env.reset()
rAll = 0
done = False
# The Q-Table learning algorithm
while not done:
# Choose an action by greedily (with noise) picking from Q table
action = np.argmax(Q[state, :] + np.random.randn(1,
action_space_n) / (i + 1))
# Get new state and reward from environment
new_state, reward, done, _ = env.step(action)
# Update Q-Table with new knowledge using learning rate
Q[state, action] = (1 - learning_rate) * Q[state, action] \
+ learning_rate * (reward + dis * np.max(Q[new_state, :]))
rAll += reward
state = new_state
rList.append(rAll)
env.close()
gym.upload("gym-results", api_key="sk_VT2wPcSSOylnlPORltmQ")
print("Score over time: " + str(sum(rList) / num_episodes))
print("Final Q-Table Values")
print(Q)
plt.bar(range(len(rList)), rList, color="blue")
plt.show()