Skip to content

Commit f4a0572

Browse files
DEKHTIARJonathanzsdonghao
authored andcommitted
Test documentation (#511)
* Activation Cleaning Docstring Test * Requirements Pinned with range to insure tested versions are used. Range are used to prevent updating requirements all the time. * setup.cfg file added with PEP8 configuration * activation.py refactored * docstring fixed - ready for documentation unittest * Yapf correction for max_line_length: 120 * test yapf refactored * test documentation added * Missing requirement added: sphinx * Allow test on documentation to pass on warning * Fix travis dependencies install * Travis install script fixed * Travis install command fixed * Requirements conflict solved * Yapf Style modified and merged in file "setup.cfg" * Yapf Confiuguration Updated * Code Refactored with new YAPF formating style * Code Refactored with new YAPF formating style * Code Refactored with new YAPF formating style * shorten codes * Various Cleaning * Trailing Slashes removed * Test Recurrent Fixed * Line Width Fix * docs requirements updated * fix example docs style * Codacy Issue Fixed * Merge Errors fixed * YAPF Style Applied
1 parent e557e62 commit f4a0572

34 files changed

+244
-261
lines changed

.style.yapf

Lines changed: 0 additions & 60 deletions
This file was deleted.

.travis.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ env:
2323
install:
2424
- pip install tensorflow
2525
- pip install -r requirements.txt
26-
- pip install .[test]
26+
- pip install -e .[dev,doc,test]
2727

2828

2929
script:

docs/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,4 @@ progressbar2>=3.37,<3.38
55
scikit-image>=0.13,<0.14
66
scipy>=1.0,<1.1
77
sphinx>=1.7,<1.8
8-
tensorflow==1.5.0
8+
tensorflow>=1.7,<1.8

example/tutorial_binarynet_cifar10_tfrecord.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -149,11 +149,13 @@ def read_and_decode(filename, is_train=None):
149149
x_train_, y_train_ = read_and_decode("train.cifar10", True)
150150
x_test_, y_test_ = read_and_decode("test.cifar10", False)
151151
# set the number of threads here
152-
x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \
153-
batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32)
152+
x_train_batch, y_train_batch = tf.train.shuffle_batch(
153+
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
154+
)
154155
# for testing, uses batch instead of shuffle_batch
155-
x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \
156-
batch_size=batch_size, capacity=50000, num_threads=32)
156+
x_test_batch, y_test_batch = tf.train.batch(
157+
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
158+
)
157159

158160
def model(x_crop, y_, reuse):
159161
""" For more simplified CNN APIs, check tensorlayer.org """
@@ -239,8 +241,10 @@ def model(x_crop, y_, reuse):
239241
n_batch += 1
240242

241243
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
242-
print("Epoch %d : Step %d-%d of %d took %fs" % \
243-
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time))
244+
print(
245+
"Epoch %d : Step %d-%d of %d took %fs" %
246+
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
247+
)
244248
print(" train loss: %f" % (train_loss / n_batch))
245249
print(" train acc: %f" % (train_acc / n_batch))
246250

example/tutorial_binarynet_mnist_cnn.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55
import tensorflow as tf
66
import tensorlayer as tl
77

8-
X_train, y_train, X_val, y_val, X_test, y_test = \
9-
tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
8+
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
109
# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)
1110

1211
sess = tf.InteractiveSession()

example/tutorial_bipedalwalker_a3c_continuous_action.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -198,11 +198,14 @@ def work(self):
198198
buffer_r.append(r)
199199

200200
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
201+
201202
if done:
202203
v_s_ = 0 # terminal
203204
else:
204205
v_s_ = sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
206+
205207
buffer_v_target = []
208+
206209
for r in buffer_r[::-1]: # reverse buffer r
207210
v_s_ = r + GAMMA * v_s_
208211
buffer_v_target.append(v_s_)
@@ -211,12 +214,7 @@ def work(self):
211214
buffer_s, buffer_a, buffer_v_target = (
212215
np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
213216
)
214-
215-
feed_dict = {
216-
self.AC.s: buffer_s,
217-
self.AC.a_his: buffer_a,
218-
self.AC.v_target: buffer_v_target,
219-
}
217+
feed_dict = {self.AC.s: buffer_s, self.AC.a_his: buffer_a, self.AC.v_target: buffer_v_target}
220218
# update gradients on global network
221219
self.AC.update_global(feed_dict)
222220
buffer_s, buffer_a, buffer_r = [], [], []

example/tutorial_dorefanet_cifar10_tfrecord.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -149,11 +149,13 @@ def read_and_decode(filename, is_train=None):
149149
x_train_, y_train_ = read_and_decode("train.cifar10", True)
150150
x_test_, y_test_ = read_and_decode("test.cifar10", False)
151151
# set the number of threads here
152-
x_train_batch, y_train_batch = tf.train.shuffle_batch([x_train_, y_train_], \
153-
batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32)
152+
x_train_batch, y_train_batch = tf.train.shuffle_batch(
153+
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
154+
)
154155
# for testing, uses batch instead of shuffle_batch
155-
x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], \
156-
batch_size=batch_size, capacity=50000, num_threads=32)
156+
x_test_batch, y_test_batch = tf.train.batch(
157+
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
158+
)
157159

158160
def model(x_crop, y_, reuse):
159161
""" For more simplified CNN APIs, check tensorlayer.org """
@@ -235,8 +237,10 @@ def model(x_crop, y_, reuse):
235237
n_batch += 1
236238

237239
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
238-
print("Epoch %d : Step %d-%d of %d took %fs" % \
239-
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time))
240+
print(
241+
"Epoch %d : Step %d-%d of %d took %fs" %
242+
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
243+
)
240244
print(" train loss: %f" % (train_loss / n_batch))
241245
print(" train acc: %f" % (train_acc / n_batch))
242246

example/tutorial_dorefanet_mnist_cnn.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55
import tensorflow as tf
66
import tensorlayer as tl
77

8-
X_train, y_train, X_val, y_val, X_test, y_test = \
9-
tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
8+
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
109
# X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)
1110

1211
sess = tf.InteractiveSession()

example/tutorial_generate_text.py

Lines changed: 30 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -230,10 +230,13 @@ def inference(x, is_train, sequence_length, reuse=None):
230230
rnn_init = tf.random_uniform_initializer(-init_scale, init_scale)
231231
with tf.variable_scope("model", reuse=reuse):
232232
network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='embedding')
233-
network = RNNLayer(network, cell_fn=tf.contrib.rnn.BasicLSTMCell, \
234-
cell_init_args={'forget_bias': 0.0, 'state_is_tuple': True}, \
235-
n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,
236-
return_seq_2d=True, name='lstm1')
233+
network = RNNLayer(
234+
network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={
235+
'forget_bias': 0.0,
236+
'state_is_tuple': True
237+
}, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,
238+
return_seq_2d=True, name='lstm1'
239+
)
237240
lstm1 = network
238241
network = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=tf.identity, name='output')
239242
return network, lstm1
@@ -297,14 +300,21 @@ def loss_fn(outputs, targets, batch_size, sequence_length):
297300
## reset all states at the begining of every epoch
298301
state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)
299302
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)):
300-
_cost, state1, _ = sess.run([cost, lstm1.final_state, train_op], \
301-
feed_dict={input_data: x, targets: y, lstm1.initial_state: state1})
303+
_cost, state1, _ = sess.run(
304+
[cost, lstm1.final_state, train_op], feed_dict={
305+
input_data: x,
306+
targets: y,
307+
lstm1.initial_state: state1
308+
}
309+
)
302310
costs += _cost
303311
iters += sequence_length
304312

305313
if step % (epoch_size // 10) == 1:
306-
print("%.3f perplexity: %.3f speed: %.0f wps" % \
307-
(step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)))
314+
print(
315+
"%.3f perplexity: %.3f speed: %.0f wps" %
316+
(step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))
317+
)
308318
train_perplexity = np.exp(costs / iters)
309319
# print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
310320
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity))
@@ -319,14 +329,22 @@ def loss_fn(outputs, targets, batch_size, sequence_length):
319329
# feed the seed to initialize the state for generation.
320330
for ids in outs_id[:-1]:
321331
a_id = np.asarray(ids).reshape(1, 1)
322-
state1 = sess.run([lstm1_test.final_state], \
323-
feed_dict={input_data_test: a_id, lstm1_test.initial_state: state1})
332+
state1 = sess.run(
333+
[lstm1_test.final_state], feed_dict={
334+
input_data_test: a_id,
335+
lstm1_test.initial_state: state1
336+
}
337+
)
324338
# feed the last word in seed, and start to generate sentence.
325339
a_id = outs_id[-1]
326340
for _ in range(print_length):
327341
a_id = np.asarray(a_id).reshape(1, 1)
328-
out, state1 = sess.run([y_soft, lstm1_test.final_state], \
329-
feed_dict={input_data_test: a_id, lstm1_test.initial_state: state1})
342+
out, state1 = sess.run(
343+
[y_soft, lstm1_test.final_state], feed_dict={
344+
input_data_test: a_id,
345+
lstm1_test.initial_state: state1
346+
}
347+
)
330348
## Without sampling
331349
# a_id = np.argmax(out[0])
332350
## Sample from all words, if vocab_size is large,

example/tutorial_imagenet_inceptionV3_distributed.py

Lines changed: 13 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,7 @@
2525
from tensorflow.python.framework.errors_impl import OutOfRangeError
2626
from tensorflow.python.training import session_run_hook
2727
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook
28-
from tensorflow.python.training.monitored_session import \
29-
SingularMonitoredSession
28+
from tensorflow.python.training.monitored_session import SingularMonitoredSession
3029

3130
import tensorlayer as tl
3231

@@ -294,18 +293,15 @@ def calculate_metrics(predicted_batch, real_batch, threshold=0.5, is_training=Fa
294293
def run_evaluator(task_spec, checkpoints_path, batch_size=32):
295294
with tf.Graph().as_default():
296295
# load dataset
297-
images_input, one_hot_classes, num_classes, _dataset_size = \
298-
load_data(file=VAL_FILE,
299-
task_spec=task_spec,
300-
batch_size=batch_size,
301-
epochs=1)
296+
images_input, one_hot_classes, num_classes, _dataset_size = load_data(
297+
file=VAL_FILE, task_spec=task_spec, batch_size=batch_size, epochs=1
298+
)
302299
_network, predictions = build_network(images_input, num_classes=num_classes, is_training=False)
303300
saver = tf.train.Saver()
304301
# metrics
305-
metrics_init_ops, _, metrics_ops = \
306-
calculate_metrics(predicted_batch=predictions,
307-
real_batch=one_hot_classes,
308-
is_training=False)
302+
metrics_init_ops, _, metrics_ops = calculate_metrics(
303+
predicted_batch=predictions, real_batch=one_hot_classes, is_training=False
304+
)
309305
# tensorboard summary
310306
summary_op = tf.summary.merge_all()
311307
# session hook
@@ -338,12 +334,9 @@ def run_worker(task_spec, checkpoints_path, batch_size=32, epochs=10):
338334
global_step = tf.train.get_or_create_global_step()
339335
with tf.device(device_fn):
340336
# load dataset
341-
images_input, one_hot_classes, num_classes, dataset_size = \
342-
load_data(file=TRAIN_FILE,
343-
task_spec=task_spec,
344-
batch_size=batch_size,
345-
epochs=epochs,
346-
shuffle_size=10000)
337+
images_input, one_hot_classes, num_classes, dataset_size = load_data(
338+
file=TRAIN_FILE, task_spec=task_spec, batch_size=batch_size, epochs=epochs, shuffle_size=10000
339+
)
347340
# network
348341
network, predictions = build_network(images_input, num_classes=num_classes, is_training=True)
349342
# training operations
@@ -390,8 +383,9 @@ def run_worker(task_spec, checkpoints_path, batch_size=32, epochs=10):
390383
last_log_time = time.time()
391384
next_log_time = last_log_time + 60
392385
while not sess.should_stop():
393-
step, loss_val, learning_rate_val, _, metrics = \
394-
sess.run([global_step, loss, learning_rate, train_op, metrics_ops])
386+
step, loss_val, learning_rate_val, _, metrics = sess.run(
387+
[global_step, loss, learning_rate, train_op, metrics_ops]
388+
)
395389
if task_spec is None or task_spec.is_master():
396390
now = time.time()
397391
if now > next_log_time:

0 commit comments

Comments
 (0)