-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
75 lines (64 loc) · 2.57 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
avgReward = 0
while (avgReward / 100) < 195:
import gym
env = gym.make('CartPole-v1')
import numpy as np
import tensorflow as tf
y = tf.placeholder(tf.float32, shape=[None, 4])
m1 = tf.Variable(tf.truncated_normal(shape=[4, 16]))
m2 = tf.Variable(tf.truncated_normal(shape=[16, 8]))
m3 = tf.Variable(tf.truncated_normal(shape=[8, 4]))
x = tf.placeholder(tf.float32, shape=[None, 4])
mx_b_ie_Qtable = tf.matmul(x, m1)
mx_b_ie_Qtable = tf.matmul(mx_b_ie_Qtable, m2)
mx_b_ie_Qtable = tf.matmul(mx_b_ie_Qtable, m3)
loss = tf.reduce_mean(tf.square(mx_b_ie_Qtable - y))
trainingStep = tf.train.AdamOptimizer(0.01).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
state = 0
failed = False
wlf = 0
for i in range(2700):
feature = env.reset()
while not failed:
import random
featureNP = np.reshape(np.array(feature), [-1, 4])
# state = random.choice(range(2))
action = random.choice(range(2))
featureForNext, reward, failed, _ = env.step(action)
Qtabel = sess.run(mx_b_ie_Qtable, feed_dict={x: featureNP})
Qtabel = np.reshape(np.array(Qtabel), [2, 2])
Qtabel[state, action] = reward + 0.97 * np.max(Qtabel[action])
labelNP = np.reshape(np.array(Qtabel), [-1, 4])
for j in range(10):
sess.run([trainingStep, loss],
feed_dict={x: featureNP, y: labelNP})
wlf = sess.run(loss,
feed_dict={x: featureNP, y: labelNP})
# env.render()
feature = featureForNext
state = action
highestReward = 0
avgReward = 0
totalReward = 0
for i in range(100):
highestReward = totalReward if totalReward > highestReward else highestReward
avgReward = avgReward + totalReward
totalReward = 0
feature = env.reset()
failed = False
state = 0
while not failed:
featureNP = np.reshape(np.array(feature), [-1, 4])
Qtabel = sess.run(mx_b_ie_Qtable, feed_dict={x: featureNP})
Qtabel = np.reshape(np.array(Qtabel), [2, 2])
action = np.argmax(Qtabel[state])
# print(action, "**")
featureForNext, reward, failed, _ = env.step(action)
feature = featureForNext
# env.render()
totalReward = reward + totalReward
# print(totalReward)
state = action
print(highestReward, avgReward / 100, wlf)