Skip to content

Commit 35f57ad

Browse files
brianwooridle
brian
authored andcommitted
double DQN batch form modified
1 parent a187250 commit 35f57ad

File tree

1 file changed

+2
-5
lines changed

1 file changed

+2
-5
lines changed

Code 2. Cartpole/1. DQN/Cartpole_DQN.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import random
55
import numpy as np
66
from collections import deque
7-
import tensorflow as tf
87
from keras.layers import Dense
98
from keras.optimizers import Adam
109
from keras.models import Sequential
@@ -18,7 +17,7 @@
1817
class DQNAgent:
1918
def __init__(self, state_size, action_size):
2019
# if you want to see Cartpole learning, then change to True
21-
self.render = False
20+
self.render = True
2221

2322
# get size of state and action
2423
self.state_size = state_size
@@ -102,8 +101,6 @@ def train_replay(self):
102101

103102
# and do the model fit!
104103
self.model.fit(update_input, target, batch_size=self.batch_size, epochs=1, verbose=0)
105-
#hist = self.model.fit(update_input, update_target, batch_size=batch_size, epochs=1, verbose=0)
106-
#self.avg_loss += hist.history['loss'][0]
107104

108105
# load the saved model
109106
def load_model(self, name):
@@ -157,7 +154,7 @@ def save_model(self, name):
157154
score = score if score == 500 else score + 100
158155
scores.append(score)
159156
episodes.append(e)
160-
pylab.plot(episodes, scores, 'b')
157+
#pylab.plot(episodes, scores, 'b')
161158
# pylab.savefig("./save_graph/Cartpole_DQN.png")
162159
print("episode:", e, " score:", score, " memory length:", len(agent.memory),
163160
" epsilon:", agent.epsilon)

0 commit comments

Comments
 (0)