Skip to content

Commit e557e62

Browse files
zsdonghaoDEKHTIARJonathan
authored andcommitted
fix example docs style (#517)
1 parent 98291f0 commit e557e62

26 files changed

+379
-934
lines changed

example/tutorial_atari_pong.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -133,11 +133,7 @@ def prepro(I):
133133
prev_x = None
134134

135135
if reward != 0:
136-
print(
137-
(
138-
'episode %d: game %d took %.5fs, reward: %f' %
139-
(episode_number, game_number, time.time() - start_time, reward)
140-
), ('' if reward == -1 else ' !!!!!!!!')
141-
)
136+
print(('episode %d: game %d took %.5fs, reward: %f' % (episode_number, game_number, \
137+
time.time() - start_time, reward)), ('' if reward == -1 else ' !!!!!!!!'))
142138
start_time = time.time()
143139
game_number += 1

example/tutorial_binarynet_cifar10_tfrecord.py

Lines changed: 16 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -148,86 +148,31 @@ def read_and_decode(filename, is_train=None):
148148
# prepare data in cpu
149149
x_train_, y_train_ = read_and_decode("train.cifar10", True)
150150
x_test_, y_test_ = read_and_decode("test.cifar10", False)
151-
152-
x_train_batch, y_train_batch = tf.train.shuffle_batch(
153-
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
154-
) # set the number of threads here
151+
# set the number of threads here
152+
x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \
153+
batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32)
155154
# for testing, uses batch instead of shuffle_batch
156-
x_test_batch, y_test_batch = tf.train.batch(
157-
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
158-
)
155+
x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \
156+
batch_size=batch_size, capacity=50000, num_threads=32)
159157

160158
def model(x_crop, y_, reuse):
161159
""" For more simplified CNN APIs, check tensorlayer.org """
162-
W_init = tf.truncated_normal_initializer(stddev=5e-2)
163-
W_init2 = tf.truncated_normal_initializer(stddev=0.04)
164-
b_init2 = tf.constant_initializer(value=0.1)
165160
with tf.variable_scope("model", reuse=reuse):
166161
net = tl.layers.InputLayer(x_crop, name='input')
167-
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')
162+
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')
168163
net = tl.layers.SignLayer(net)
169164
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
170-
net = tl.layers.LocalResponseNormLayer(
171-
net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1'
172-
)
173-
net = tl.layers.BinaryConv2d(
174-
net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2'
175-
)
176-
net = tl.layers.LocalResponseNormLayer(
177-
net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2'
178-
)
165+
net = tl.layers.LocalResponseNormLayer(net, 4, 1.0, 0.001 / 9.0, 0.75, name='norm1')
166+
net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')
167+
net = tl.layers.LocalResponseNormLayer(net, 4, 1.0, 0.001 / 9.0, 0.75, name='norm2')
179168
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
180-
net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
169+
net = tl.layers.FlattenLayer(net, name='flatten')
181170
net = tl.layers.SignLayer(net)
182-
net = tl.layers.BinaryDenseLayer(
183-
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
184-
) # output: (batch_size, 384)
171+
net = tl.layers.BinaryDenseLayer(net, 384, act=tf.nn.relu, name='d1relu')
185172
net = tl.layers.SignLayer(net)
186-
net = tl.layers.BinaryDenseLayer(
187-
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
188-
) # output: (batch_size, 192)
189-
net = tl.layers.DenseLayer(
190-
net, n_units=10, act=tf.identity, W_init=W_init2, name='output'
191-
) # output: (batch_size, 10)
192-
y = net.outputs
193-
194-
ce = tl.cost.cross_entropy(y, y_, name='cost')
195-
# L2 for the MLP, without this, the accuracy will be reduced by 15%.
196-
L2 = 0
197-
for p in tl.layers.get_variables_with_name('relu/W', True, True):
198-
L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
199-
cost = ce + L2
173+
net = tl.layers.BinaryDenseLayer(net, 192, act=tf.nn.relu, name='d2relu')
174+
net = tl.layers.DenseLayer(net, 10, act=tf.identity, name='output')
200175

201-
# correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)
202-
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
203-
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
204-
205-
return net, cost, acc
206-
207-
def model_batch_norm(x_crop, y_, reuse, is_train):
208-
""" Batch normalization should be placed before rectifier. """
209-
W_init = tf.truncated_normal_initializer(stddev=5e-2)
210-
W_init2 = tf.truncated_normal_initializer(stddev=0.04)
211-
b_init2 = tf.constant_initializer(value=0.1)
212-
with tf.variable_scope("model", reuse=reuse):
213-
net = InputLayer(x_crop, name='input')
214-
215-
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')
216-
net = tl.layers.BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch1')
217-
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
218-
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')
219-
net = tl.layers.BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch2')
220-
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
221-
net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
222-
net = tl.layers.DenseLayer(
223-
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
224-
) # output: (batch_size, 384)
225-
net = tl.layers.DenseLayer(
226-
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
227-
) # output: (batch_size, 192)
228-
net = tl.layers.DenseLayer(
229-
net, n_units=10, act=tf.identity, W_init=W_init2, name='output'
230-
) # output: (batch_size, 10)
231176
y = net.outputs
232177

233178
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -237,6 +182,7 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
237182
L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
238183
cost = ce + L2
239184

185+
# correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)
240186
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
241187
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
242188

@@ -249,12 +195,8 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
249195
# cost, acc, network = model(x_crop, y_, None)
250196

251197
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
252-
## using local response normalization
253198
network, cost, acc, = model(x_train_batch, y_train_batch, False)
254199
_, cost_test, acc_test = model(x_test_batch, y_test_batch, True)
255-
## you may want to try batch normalization
256-
# network, cost, acc, = model_batch_norm(x_train_batch, y_train_batch, None, is_train=True)
257-
# _, cost_test, acc_test = model_batch_norm(x_test_batch, y_test_batch, True, is_train=False)
258200

259201
## train
260202
n_epoch = 50000
@@ -297,10 +239,8 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
297239
n_batch += 1
298240

299241
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
300-
print(
301-
"Epoch %d : Step %d-%d of %d took %fs" %
302-
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
303-
)
242+
print("Epoch %d : Step %d-%d of %d took %fs" % \
243+
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time))
304244
print(" train loss: %f" % (train_loss / n_batch))
305245
print(" train acc: %f" % (train_acc / n_batch))
306246

example/tutorial_binarynet_mnist_cnn.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,7 @@
22
# -*- coding: utf-8 -*-
33

44
import time
5-
65
import tensorflow as tf
7-
86
import tensorlayer as tl
97

108
X_train, y_train, X_val, y_val, X_test, y_test = \

example/tutorial_cartpole_ac.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -147,9 +147,8 @@ def learn(self, s, r, s_):
147147
sess = tf.Session()
148148

149149
actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)
150-
critic = Critic(
151-
sess, n_features=N_F, lr=LR_C
152-
) # we need a good teacher, so the teacher should learn faster than the actor
150+
# we need a good teacher, so the teacher should learn faster than the actor
151+
critic = Critic(sess, n_features=N_F, lr=LR_C)
153152

154153
tl.layers.initialize_global_variables(sess)
155154

@@ -193,10 +192,8 @@ def learn(self, s, r, s_):
193192
running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
194193
# start rending if running_reward greater than a threshold
195194
# if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True
196-
print(
197-
"Episode: %d reward: %f running_reward %f took: %.5f" %
198-
(i_episode, ep_rs_sum, running_reward, time.time() - episode_time)
199-
)
195+
print("Episode: %d reward: %f running_reward %f took: %.5f" % \
196+
(i_episode, ep_rs_sum, running_reward, time.time() - episode_time))
200197

201198
# Early Stopping for quick check
202199
if t >= MAX_EP_STEPS:

example/tutorial_cifar10.py

Lines changed: 8 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,9 @@
11
#! /usr/bin/python
22
# -*- coding: utf-8 -*-
33

4-
# tl.prepro for data augmentation
5-
64
import time
7-
85
import numpy as np
96
import tensorflow as tf
10-
117
import tensorlayer as tl
128
from tensorlayer.layers import *
139

@@ -23,36 +19,17 @@ def model(x, y_, reuse):
2319
with tf.variable_scope("model", reuse=reuse):
2420
net = InputLayer(x, name='input')
2521
net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')
26-
# net = Conv2dLayer(net, act=tf.nn.relu, shape=[5, 5, 3, 64],
27-
# strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5x3 patch
28-
# W_init=W_init, name ='cnn1') # output: (batch_size, 24, 24, 64)
2922
net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
30-
# net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
31-
# padding='SAME', pool = tf.nn.max_pool, name ='pool1',)# output: (batch_size, 12, 12, 64)
3223
net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
33-
# net.outputs = tf.nn.lrn(net.outputs, 4, bias=1.0, alpha=0.001 / 9.0,
34-
# beta=0.75, name='norm1')
3524

3625
net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')
37-
# net = Conv2dLayer(net, act=tf.nn.relu, shape=[5, 5, 64, 64],
38-
# strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5 patch
39-
# W_init=W_init, name ='cnn2') # output: (batch_size, 12, 12, 64)
4026
net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
41-
# net.outputs = tf.nn.lrn(net.outputs, 4, bias=1.0, alpha=0.001 / 9.0,
42-
# beta=0.75, name='norm2')
4327
net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
44-
# net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
45-
# padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64)
46-
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
47-
net = DenseLayer(
48-
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
49-
) # output: (batch_size, 384)
50-
net = DenseLayer(
51-
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
52-
) # output: (batch_size, 192)
53-
net = DenseLayer(
54-
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output'
55-
) # output: (batch_size, 10)
28+
29+
net = FlattenLayer(net, name='flatten')
30+
net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')
31+
net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')
32+
net = DenseLayer(net, 10, act=tf.identity, W_init=W_init2, name='output')
5633
y = net.outputs
5734

5835
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -75,35 +52,18 @@ def model_batch_norm(x, y_, reuse, is_train):
7552
b_init2 = tf.constant_initializer(value=0.1)
7653
with tf.variable_scope("model", reuse=reuse):
7754
net = InputLayer(x, name='input')
78-
7955
net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')
80-
# net = Conv2dLayer(net, act=tf.identity, shape=[5, 5, 3, 64],
81-
# strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5x3 patch
82-
# W_init=W_init, b_init=None, name='cnn1') # output: (batch_size, 24, 24, 64)
8356
net = BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch1')
8457
net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
85-
# net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
86-
# padding='SAME', pool=tf.nn.max_pool, name='pool1',) # output: (batch_size, 12, 12, 64)
8758

8859
net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')
89-
# net = Conv2dLayer(net, act=tf.identity, shape=[5, 5, 64, 64],
90-
# strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5 patch
91-
# W_init=W_init, b_init=None, name ='cnn2') # output: (batch_size, 12, 12, 64)
9260
net = BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch2')
9361
net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
94-
# net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
95-
# padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64)
9662

9763
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
98-
net = DenseLayer(
99-
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
100-
) # output: (batch_size, 384)
101-
net = DenseLayer(
102-
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
103-
) # output: (batch_size, 192)
104-
net = DenseLayer(
105-
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output'
106-
) # output: (batch_size, 10)
64+
net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')
65+
net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')
66+
net = DenseLayer(net, 10, act=tf.identity, W_init=W_init2, name='output')
10767
y = net.outputs
10868

10969
ce = tl.cost.cross_entropy(y, y_, name='cost')

0 commit comments

Comments
 (0)