diff --git a/CHANGELOG.md b/CHANGELOG.md index c2499538d..ddb5238d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -161,6 +161,7 @@ To release a new version, please update the changelog as followed: - Update pytest from 3.5.1 to 3.6.0 (by @DEKHTIARJonathan and @pyup-bot in #647) - Update progressbar2 from 3.37.1 to 3.38.0 (by @DEKHTIARJonathan and @pyup-bot in #651) - Update scikit-image from 0.13.1 to 0.14.0 (by @DEKHTIARJonathan and @pyup-bot in #656) +- Update keras from 2.1.6 to 2.2.0 (by @DEKHTIARJonathan and @pyup-bot in #684) ### Contributors @lgarithm @DEKHTIARJonathan @2wins @One-sixth @zsdonghao @luomai @@ -178,6 +179,7 @@ To release a new version, please update the changelog as followed: - CircleCI added to build and upload Docker Containers for each PR merged and tag release (by @DEKHTIARJonathan in #648) - Decorator: - `tl.decorators` API created including `deprecated_alias` and `private_method` (by @DEKHTIARJonathan in #660) + - `tl.decorators` API enriched with `protected_method` (by @DEKHTIARJonathan in #675) - Docker: - Containers for each release and for each PR merged on master built (by @DEKHTIARJonathan in #648) - Containers built in the following configurations (by @DEKHTIARJonathan in #648): @@ -186,11 +188,13 @@ To release a new version, please update the changelog as followed: - py3 + cpu - py3 + gpu - Documentation: + - Clean README (by @luomai in #677) - Release semantic version added on index page (by @DEKHTIARJonathan in #633) - Optimizers page added (by @DEKHTIARJonathan in #636) - `AMSGrad` added on Optimizers page added (by @DEKHTIARJonathan in #636) - Layer: - ElementwiseLambdaLayer added to use custom function to connect multiple layer inputs (by @One-sixth in #579) + - AtrousDeConv2dLayer added (by @2wins in #662) - Optimizer: - AMSGrad Optimizer added based on `On the Convergence of Adam and Beyond (ICLR 2018)` (by @DEKHTIARJonathan in #636) - Setup: @@ -223,7 +227,10 @@ To release a new version, please update the changelog as followed: - All the tests are now using a DEBUG level verbosity when run individualy (by @DEKHTIARJonathan in #660) - `tf.identity` as activation is **ignored**, thus reducing the size of the graph by removing useless operation (by @DEKHTIARJonathan in #667) - argument dictionaries are now checked and saved within the `Layer` Base Class (by @DEKHTIARJonathan in #667) -- `unstack_layer` function transformed into Class `UnStackLayer` (by @DEKHTIARJonathan in #683) +- `Layer` Base Class now presenting methods to update faultlessly `all_layers`, `all_params`, and `all_drop` (by @DEKHTIARJonathan in #675) +- Input Layers have been removed from `tl.layers.core` and added to `tl.layers.inputs` (by @DEKHTIARJonathan in #675) +- Input Layers are now considered as true layers in the graph (they represent a placeholder), unittests have been updated (by @DEKHTIARJonathan in #675) +- Layer API is simplified, with automatic feeding `prev_layer` into `self.inputs` (by @DEKHTIARJonathan in #675) ### Deprecated - `tl.layers.TimeDistributedLayer` argurment `args` is deprecated in favor of `layer_args` (by @DEKHTIARJonathan in #667) @@ -231,25 +238,30 @@ To release a new version, please update the changelog as followed: ### Removed - `assert()` calls remove and replaced by `raise AssertionError()` (by @DEKHTIARJonathan in #667) - `tl.identity` is removed, not used anymore and deprecated for a long time (by @DEKHTIARJonathan in #667) +- All Code specific to `TF.__version__ < "1.6"` have been removed (by @DEKHTIARJonathan in #675) ### Fixed - Issue #498 - Deprecation Warning Fix in `tl.layers.RNNLayer` with `inspect` (by @DEKHTIARJonathan in #574) - Issue #498 - Deprecation Warning Fix in `tl.files` with truth value of an empty array is ambiguous (by @DEKHTIARJonathan in #575) - Issue #565 related to `tl.utils.predict` fixed - `np.hstack` problem in which the results for multiple batches are stacked along `axis=1` (by @2wins in #566) - Issue #572 with `tl.layers.DeformableConv2d` fixed (by @DEKHTIARJonathan in #573) +- Issue #664 with `tl.layers.ConvLSTMLayer` fixed (by @dengyueyun666 in #676) - Typo of the document of ElementwiseLambdaLayer (by @zsdonghao in #588) - Error in `tl.layers.TernaryConv2d` fixed - self.inputs not defined (by @DEKHTIARJonathan in #658) - Deprecation warning fixed in `tl.layers.binary._compute_threshold()` (by @DEKHTIARJonathan in #658) - All references to `tf.logging` replaced by `tl.logging` (by @DEKHTIARJonathan in #661) - Duplicated code removed when bias was used (by @DEKHTIARJonathan in #667) +- `tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops` is now lazy loaded to prevent systematic error raised (by @DEKHTIARJonathan in #675) - Tutorial: - `tutorial_word2vec_basic.py` saving issue #476 fixed (by @DEKHTIARJonathan in #635) - All tutorials tested and errors have been fixed (by @DEKHTIARJonathan in #635) +### Security ### Dependencies Update - Update pytest from 3.5.1 to 3.6.0 (by @DEKHTIARJonathan and @pyup-bot in #647) - Update progressbar2 from 3.37.1 to 3.38.0 (by @DEKHTIARJonathan and @pyup-bot in #651) - Update scikit-image from 0.13.1 to 0.14.0 (by @DEKHTIARJonathan and @pyup-bot in #656) +- Update keras from 2.1.6 to 2.2.0 (by @DEKHTIARJonathan and @pyup-bot in #684) ### Contributors @lgarithm @DEKHTIARJonathan @2wins @One-sixth @zsdonghao @luomai @@ -294,5 +306,5 @@ To release a new version, please update the changelog as followed: @zsdonghao @luomai @DEKHTIARJonathan [Unreleased]: https://github.com/tensorlayer/tensorlayer/compare/1.8.5...master -[1.8.6]: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc3...1.8.5 +[1.8.6]: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc4...1.8.5 [1.8.5]: https://github.com/tensorlayer/tensorlayer/compare/1.8.4...1.8.5 diff --git a/README.md b/README.md index 9d11aef0a..98542d164 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ ![PyPI Stable Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer.svg?label=PyPI%20-%20Release) ![PyPI RC Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/release/tensorlayer/tensorlayer/all.svg?label=PyPI%20-%20Pre-Release) -[![Github commits (since latest release)](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg)](https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc3...master) +[![Github commits (since latest release)](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg)](https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc4...master) [![PyPI - Python Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/pypi/pyversions/tensorlayer.svg)](https://pypi.org/project/tensorlayer/) [![Supported TF Version](http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/badge/tensorflow-1.6.0+-blue.svg)](https://github.com/tensorflow/tensorflow/releases) diff --git a/README.rst b/README.rst index dc389b6ae..bd0179789 100644 --- a/README.rst +++ b/README.rst @@ -40,7 +40,7 @@ :target: https://pypi.org/project/tensorlayer/ .. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/github/commits-since/tensorlayer/tensorlayer/latest.svg - :target: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc3...master + :target: https://github.com/tensorlayer/tensorlayer/compare/1.8.6rc4...master .. image:: http://ec2-35-178-47-120.eu-west-2.compute.amazonaws.com/pypi/pyversions/tensorlayer.svg :target: https://pypi.org/project/tensorlayer/ diff --git a/example/tutorial_frozenlake_dqn.py b/example/tutorial_frozenlake_dqn.py index a487af9d2..9229f6c77 100644 --- a/example/tutorial_frozenlake_dqn.py +++ b/example/tutorial_frozenlake_dqn.py @@ -103,5 +103,5 @@ def to_one_hot(i, n_classes=None): ## Note that, the rewards here with random action running_reward = rAll if running_reward is None else running_reward * 0.99 + rAll * 0.01 - print("Episode [%d/%d] sum reward:%f running reward:%f took:%.5fs %s" % \ + print("Episode [%d/%d] sum reward: %f running reward: %f took: %.5fs %s" % \ (i, num_episodes, rAll, running_reward, time.time() - episode_time, '' if rAll == 0 else ' !!!!!!!!')) diff --git a/example/tutorial_frozenlake_q_table.py b/example/tutorial_frozenlake_q_table.py index e2b880728..9380b8735 100644 --- a/example/tutorial_frozenlake_q_table.py +++ b/example/tutorial_frozenlake_q_table.py @@ -50,7 +50,7 @@ break rList.append(rAll) running_reward = r if running_reward is None else running_reward * 0.99 + r * 0.01 - print("Episode [%d/%d] sum reward:%f running reward:%f took:%.5fs %s" % \ + print("Episode [%d/%d] sum reward: %f running reward: %f took: %.5fs %s" % \ (i, num_episodes, rAll, running_reward, time.time() - episode_time, '' if rAll == 0 else ' !!!!!!!!')) print("Final Q-Table Values:/n %s" % Q) diff --git a/example/tutorial_word2vec_basic.py b/example/tutorial_word2vec_basic.py index b89428d6a..ec3183e00 100644 --- a/example/tutorial_word2vec_basic.py +++ b/example/tutorial_word2vec_basic.py @@ -250,7 +250,7 @@ def main_word2vec_basic(): if step % print_freq == 0: if step > 0: average_loss /= print_freq - print("Average loss at step %d/%d. loss:%f took:%fs" % \ + print("Average loss at step %d/%d. loss: %f took: %fs" % \ (step, num_steps, average_loss, time.time() - start_time)) average_loss = 0 # Prints out nearby words given a list of words. diff --git a/requirements/requirements_test.txt b/requirements/requirements_test.txt index 9672c5c07..220ef5859 100644 --- a/requirements/requirements_test.txt +++ b/requirements/requirements_test.txt @@ -1,4 +1,4 @@ -keras>=2.1,<2.2 +keras>=2.2,<2.3 pycodestyle>=2.4,<2.5 pydocstyle>=2.1,<2.2 pytest>=3.6,<3.7 diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index da434b834..133bcea40 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -56,7 +56,7 @@ global_dict = {} # Use the following formating: (major, minor, patch, prerelease) -VERSION = (1, 8, 6, "rc3") +VERSION = (1, 8, 6, "rc4") __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:]) diff --git a/tensorlayer/cli/train.py b/tensorlayer/cli/train.py index f05bc3d0e..d63b7f5a0 100755 --- a/tensorlayer/cli/train.py +++ b/tensorlayer/cli/train.py @@ -139,8 +139,8 @@ def main(args): print('Using program %s with args %s' % (args.file, ' '.join(args.args))) print('Using %d workers, %d parameter servers, %d GPUs.' % (num_workers, args.num_pss, len(GPU_IDS))) cluster_spec = { - 'ps': ['localhost:%d' % (PORT_BASE + i) for i in range(args.num_pss)], - 'worker': ['localhost:%d' % (PORT_BASE + args.num_pss + i) for i in range(num_workers)] + 'ps': ['localhost: %d' % (PORT_BASE + i) for i in range(args.num_pss)], + 'worker': ['localhost: %d' % (PORT_BASE + args.num_pss + i) for i in range(num_workers)] } processes = list(create_tf_jobs(cluster_spec, args.file, args.args)) try: diff --git a/tensorlayer/files/utils.py b/tensorlayer/files/utils.py index 5250768a2..4e5dc96aa 100644 --- a/tensorlayer/files/utils.py +++ b/tensorlayer/files/utils.py @@ -396,7 +396,7 @@ def load_cropped_svhn(path='data', include_extra=True): logging.info(" added n_extra {} to n_train {} took {}s".format(len(y_extra), len(y_train), time.time() - t)) else: logging.info(" no extra images are included") - logging.info(" image size:%s n_train:%d n_test:%d" % (str(X_train.shape[1:4]), len(y_train), len(y_test))) + logging.info(" image size: %s n_train: %d n_test: %d" % (str(X_train.shape[1:4]), len(y_train), len(y_test))) logging.info(" took: {}s".format(int(time.time() - start_time))) return X_train, y_train, X_test, y_test diff --git a/tensorlayer/layers/binary.py b/tensorlayer/layers/binary.py index 128fc8428..a61c7f15d 100644 --- a/tensorlayer/layers/binary.py +++ b/tensorlayer/layers/binary.py @@ -150,7 +150,7 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( "BinaryDenseLayer %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else '- No Activation') + (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') ) if self.inputs.get_shape().ndims != 2: @@ -279,9 +279,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "BinaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % ( + "BinaryConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( self.name, n_filter, str(filter_size), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -378,7 +378,7 @@ def __init__( logging.info( "TernaryDenseLayer %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else '- No Activation') + (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') ) if self.inputs.get_shape().ndims != 2: @@ -510,9 +510,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "TernaryConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % ( + "TernaryConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( self.name, n_filter, str(filter_size), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -619,7 +619,7 @@ def __init__( logging.info( "DorefaDenseLayer %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else '- No Activation') + (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') ) if self.inputs.get_shape().ndims != 2: @@ -756,9 +756,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "DorefaConv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % ( + "DorefaConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( self.name, n_filter, str(filter_size), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) diff --git a/tensorlayer/layers/convolution.py b/tensorlayer/layers/convolution.py index d6904d5f3..5a209a051 100644 --- a/tensorlayer/layers/convolution.py +++ b/tensorlayer/layers/convolution.py @@ -89,9 +89,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % ( + "Conv1dLayer %s: shape: %s stride: %s pad: %s act: %s" % ( self.name, str(shape), str(stride), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -211,9 +211,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % ( + "Conv2dLayer %s: shape: %s strides: %s pad: %s act: %s" % ( self.name, str(shape), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -341,9 +341,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % ( + "DeConv2dLayer %s: shape: %s out_shape: %s strides: %s pad: %s act: %s" % ( self.name, str(shape), str(output_shape), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -428,9 +428,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % ( + "Conv3dLayer %s: shape: %s strides: %s pad: %s act: %s" % ( self.name, str(shape), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -510,9 +510,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % ( + "DeConv3dLayer %s: shape: %s out_shape: %s strides: %s pad: %s act: %s" % ( self.name, str(shape), str(output_shape), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -581,7 +581,7 @@ def __init__( super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % + "UpSampling2dLayer %s: is_scale: %s size: %s method: %d align_corners: %s" % (self.name, is_scale, size, method, align_corners) ) @@ -653,7 +653,7 @@ def __init__( super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % + "DownSampling2dLayer %s: is_scale: %s size: %s method: %d, align_corners: %s" % (self.name, is_scale, size, method, align_corners) ) @@ -756,8 +756,8 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "DeformableConv2d %s: n_filter: %d, filter_size: %s act:%s" % - (self.name, n_filter, str(filter_size), self.act.__name__ if self.act is not None else '- No Activation') + "DeformableConv2d %s: n_filter: %d, filter_size: %s act: %s" % + (self.name, n_filter, str(filter_size), self.act.__name__ if self.act is not None else 'No Activation') ) self.offset_layer = offset_layer @@ -1081,9 +1081,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % ( + "AtrousConv2dLayer %s: n_filter: %d filter_size: %s rate: %d pad: %s act: %s" % ( self.name, n_filter, filter_size, rate, padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -1157,9 +1157,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "AtrousDeConv2dLayer %s: shape:%s output_shape:%s rate:%d pad:%s act:%s" % ( + "AtrousDeConv2dLayer %s: shape: %s output_shape: %s rate: %d pad: %s act: %s" % ( self.name, shape, output_shape, rate, padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -1230,6 +1230,7 @@ def deconv2d_bilinear_upsampling_initializer(shape): """ if shape[0] != shape[1]: raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes') + if shape[3] < shape[2]: raise Exception( 'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ' @@ -1320,9 +1321,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "Conv1d %s: n_filter:%d filter_size:%s stride:%d pad:%s act:%s dilation_rate:%d" % ( + "Conv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s dilation_rate: %d" % ( self.name, n_filter, filter_size, stride, padding, self.act.__name__ - if self.act is not None else '- No Activation', dilation_rate + if self.act is not None else 'No Activation', dilation_rate ) ) @@ -1412,40 +1413,24 @@ def __init__( data_format=None, name='conv2d', ): - # if W_init_args is None: - # W_init_args = {} - # if b_init_args is None: - # b_init_args = {} - # + # if len(strides) != 2: # raise ValueError("len(strides) should be 2, Conv2d and Conv2dLayer are different.") - # + # try: # pre_channel = int(layer.outputs.get_shape()[-1]) + # except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net # pre_channel = 1 # logging.info("[warnings] unknow input channels, set to 1") - # return Conv2dLayer( - # layer, - # act=act, - # shape=(filter_size[0], filter_size[1], pre_channel, n_filter), # 32 features for each 5x5 patch - # strides=(1, strides[0], strides[1], 1), - # padding=padding, - # W_init=W_init, - # W_init_args=W_init_args, - # b_init=b_init, - # b_init_args=b_init_args, - # use_cudnn_on_gpu=use_cudnn_on_gpu, - # data_format=data_format, - # name=name) super(Conv2d, self ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "Conv2d %s: n_filter:%d filter_size:%s strides:%s pad:%s act:%s" % ( + "Conv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % ( self.name, n_filter, str(filter_size), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) # with tf.variable_scope(name) as vs: @@ -1535,9 +1520,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % ( + "DeConv2d %s: n_filters: %s strides: %s pad: %s act: %s" % ( self.name, str(n_filter), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -1606,9 +1591,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % ( + "DeConv3d %s: n_filters: %s strides: %s pad: %s act: %s" % ( self.name, str(n_filter), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -1710,9 +1695,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % ( + "DepthwiseConv2d %s: shape: %s strides: %s pad: %s act: %s" % ( self.name, str(shape), str(strides), padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -1824,9 +1809,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "SeparableConv1d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" % ( + "SeparableConv1d %s: n_filter: %d filter_size: %s filter_size: %s depth_multiplier: %d act: %s" % ( self.name, n_filter, str(filter_size), str(strides), depth_multiplier, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) with tf.variable_scope(name) as vs: @@ -1934,9 +1919,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "SeparableConv2d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" % ( + "SeparableConv2d %s: n_filter: %d filter_size: %s filter_size: %s depth_multiplier: %d act: %s" % ( self.name, n_filter, str(filter_size), str(strides), depth_multiplier, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) @@ -2024,9 +2009,9 @@ def __init__( ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name) logging.info( - "GroupConv2d %s: n_filter:%d size:%s strides:%s n_group:%d pad:%s act:%s" % ( + "GroupConv2d %s: n_filter: %d size: %s strides: %s n_group: %d pad: %s act: %s" % ( self.name, n_filter, str(filter_size), str(strides), n_group, padding, self.act.__name__ - if self.act is not None else '- No Activation' + if self.act is not None else 'No Activation' ) ) diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py index 6fa79c2bb..b944f0cab 100644 --- a/tensorlayer/layers/core.py +++ b/tensorlayer/layers/core.py @@ -654,7 +654,7 @@ def __init__( logging.info( "DenseLayer %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else '- No Activation') + (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') ) self.n_units = n_units @@ -961,7 +961,7 @@ def __init__( ): super(DropoutLayer, self).__init__(prev_layer=prev_layer, name=name) - logging.info("DropoutLayer %s: keep:%f is_fix:%s" % (self.name, keep, is_fix)) + logging.info("DropoutLayer %s: keep: %f is_fix: %s" % (self.name, keep, is_fix)) if is_train is False: logging.info(" skip DropoutLayer") @@ -1029,7 +1029,7 @@ def __init__( self.outputs = prev_layer.outputs else: - logging.info("GaussianNoiseLayer %s: mean:%f stddev:%f" % (self.name, mean, stddev)) + logging.info("GaussianNoiseLayer %s: mean: %f stddev: %f" % (self.name, mean, stddev)) with tf.variable_scope(name): # noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape()) noise = tf.random_normal(shape=self.inputs.get_shape(), mean=mean, stddev=stddev, seed=seed) @@ -1100,7 +1100,7 @@ def __init__( logging.info( "DropconnectDenseLayer %s: %d %s" % - (self.name, n_units, self.act.__name__ if self.act is not None else '- No Activation') + (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation') ) if self.inputs.get_shape().ndims != 2: diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index 22e64fe97..dada28d0b 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -46,7 +46,7 @@ def __init__( ): super(ExpandDimsLayer, self).__init__(prev_layer=prev_layer, name=name) - logging.info("ExpandDimsLayer %s: axis:%d" % (self.name, axis)) + logging.info("ExpandDimsLayer %s: axis: %d" % (self.name, axis)) with tf.variable_scope(name): self.outputs = tf.expand_dims(self.inputs, axis=axis) @@ -84,7 +84,7 @@ def __init__(self, prev_layer, multiples=None, name='tile'): super(TileLayer, self).__init__(prev_layer=prev_layer, name=name) - logging.info("TileLayer %s: multiples:%s" % (self.name, multiples)) + logging.info("TileLayer %s: multiples: %s" % (self.name, multiples)) with tf.variable_scope(name): self.outputs = tf.tile(self.inputs, multiples=multiples) diff --git a/tensorlayer/layers/flow_control.py b/tensorlayer/layers/flow_control.py index f21f3c7ea..023943b89 100644 --- a/tensorlayer/layers/flow_control.py +++ b/tensorlayer/layers/flow_control.py @@ -62,7 +62,7 @@ def __init__(self, layers, name='mux_layer'): all_inputs = tf.stack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2 - logging.info("MultiplexerLayer %s: n_inputs:%d" % (self.name, self.n_inputs)) + logging.info("MultiplexerLayer %s: n_inputs: %d" % (self.name, self.n_inputs)) self.sel = tf.placeholder(tf.int32) self.outputs = tf.gather(all_inputs, self.sel, name=name) # [sel, :, : ...] # 1.2 diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index 82a1f47c6..c08f2c54a 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -111,7 +111,7 @@ def __init__( super(ElementwiseLayer, self).__init__(prev_layer=layers, name=name) logging.info( - "ElementwiseLayer %s: size:%s fn:%s" % (self.name, layers[0].outputs.get_shape(), combine_fn.__name__) + "ElementwiseLayer %s: size: %s fn: %s" % (self.name, layers[0].outputs.get_shape(), combine_fn.__name__) ) self.outputs = layers[0].outputs @@ -170,7 +170,7 @@ def __init__( name='elementwiselambda_layer', ): - super(ElementwiseLambdaLayer, self).__init__(prev_layer=layers, fn_args=fn_args, name=name) + super(ElementwiseLambdaLayer, self).__init__(prev_layer=layers, act=act, fn_args=fn_args, name=name) logging.info("ElementwiseLambdaLayer %s" % self.name) with tf.variable_scope(name) as vs: diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index cca94e7fe..da54114fd 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -113,11 +113,11 @@ def __init__( gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002), name='batchnorm_layer', ): - super(BatchNormLayer, self).__init__(prev_layer=prev_layer, name=name) + super(BatchNormLayer, self).__init__(prev_layer=prev_layer, act=act, name=name) logging.info( - "BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % - (self.name, decay, epsilon, self.act.__name__ if self.act is not None else '- No Activation', is_train) + "BatchNormLayer %s: decay: %f epsilon: %f act: %s is_train: %s" % + (self.name, decay, epsilon, self.act.__name__ if self.act is not None else 'No Activation', is_train) ) x_shape = self.inputs.get_shape() @@ -227,8 +227,8 @@ def __init__( super(InstanceNormLayer, self).__init__(prev_layer=prev_layer, act=act, name=name) logging.info( - "InstanceNormLayer %s: epsilon:%f act:%s" % - (self.name, epsilon, self.act.__name__ if self.act is not None else '- No Activation') + "InstanceNormLayer %s: epsilon: %f act: %s" % + (self.name, epsilon, self.act.__name__ if self.act is not None else 'No Activation') ) with tf.variable_scope(name) as vs: @@ -277,7 +277,7 @@ def __init__( super(LayerNormLayer, self).__init__(prev_layer=prev_layer, act=act, name=name) logging.info( - "LayerNormLayer %s: act:%s" % (self.name, self.act.__name__ if self.act is not None else '- No Activation') + "LayerNormLayer %s: act: %s" % (self.name, self.act.__name__ if self.act is not None else 'No Activation') ) with tf.variable_scope(name) as vs: diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index 77c958896..7cee0e2e2 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -49,7 +49,7 @@ def __init__( ): super(PadLayer, self).__init__(prev_layer=prev_layer, name=name) - logging.info("PadLayer %s: padding:%s mode:%s" % (self.name, list(padding), mode)) + logging.info("PadLayer %s: padding: %s mode: %s" % (self.name, list(padding), mode)) if padding is None: raise Exception( @@ -85,7 +85,7 @@ def __init__( ): super(ZeroPad1d, self).__init__(prev_layer=prev_layer, name=name) - logging.info("ZeroPad1d %s: padding:%s" % (self.name, str(padding))) + logging.info("ZeroPad1d %s: padding: %s" % (self.name, str(padding))) if not isinstance(padding, (int, tuple, dict)): raise AssertionError() @@ -120,7 +120,7 @@ def __init__( ): super(ZeroPad2d, self).__init__(prev_layer=prev_layer, name=name) - logging.info("ZeroPad2d %s: padding:%s" % (self.name, str(padding))) + logging.info("ZeroPad2d %s: padding: %s" % (self.name, str(padding))) if not isinstance(padding, (int, tuple)): raise AssertionError("Padding should be of type `int` or `tuple`") @@ -154,7 +154,7 @@ def __init__( ): super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name) - logging.info("ZeroPad3d %s: padding:%s" % (self.name, str(padding))) + logging.info("ZeroPad3d %s: padding: %s" % (self.name, str(padding))) if not isinstance(padding, (int, tuple)): raise AssertionError() diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 183337815..0b3f685c5 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -71,7 +71,7 @@ def __init__( super(PoolLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % + "PoolLayer %s: ksize: %s strides: %s padding: %s pool: %s" % (self.name, str(ksize), str(strides), padding, pool.__name__) ) @@ -110,7 +110,7 @@ def __init__( super(MaxPool1d, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "MaxPool1d %s: filter_size:%s strides:%s padding:%s" % + "MaxPool1d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding)) ) @@ -143,7 +143,7 @@ class MeanPool1d(Layer): A unique layer name. """ - # logging.info("MeanPool1d %s: filter_size:%s strides:%s padding:%s" % (self.name, str(filter_size), str(strides), str(padding))) + # logging.info("MeanPool1d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding))) # outputs = tf.layers.average_pooling1d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) # # net_new = copy.copy(prev_layer) @@ -157,7 +157,7 @@ def __init__( super(MeanPool1d, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "MeanPool1d %s: filter_size:%s strides:%s padding:%s" % + "MeanPool1d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding)) ) @@ -194,7 +194,7 @@ def __init__(self, prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME super(MaxPool2d, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "MaxPool2d %s: filter_size:%s strides:%s padding:%s" % + "MaxPool2d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding)) ) @@ -232,7 +232,7 @@ def __init__(self, prev_layer, filter_size=(3, 3), strides=(2, 2), padding='SAME super(MeanPool2d, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "MeanPool2d %s: filter_size:%s strides:%s padding:%s" % + "MeanPool2d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding)) ) @@ -279,7 +279,7 @@ def __init__( super(MaxPool3d, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "MaxPool3d %s: filter_size:%s strides:%s padding:%s" % + "MaxPool3d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding)) ) @@ -327,7 +327,7 @@ def __init__( super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "MeanPool3d %s: filter_size:%s strides:%s padding:%s" % + "MeanPool3d %s: filter_size: %s strides: %s padding: %s" % (self.name, str(filter_size), str(strides), str(padding)) ) diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 1eff2857c..18477563d 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -166,7 +166,7 @@ def __init__( logging.warning('pop state_is_tuple fails.') logging.info( - "RNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s " % + "RNNLayer %s: n_hidden: %d n_steps: %d in_dim: %d in_shape: %s cell_fn: %s " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__) ) @@ -356,7 +356,7 @@ def __init__( raise Exception("Please put in cell_fn") logging.info( - "BiRNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d " % ( + "BiRNNLayer %s: n_hidden: %d n_steps: %d in_dim: %d in_shape: %s cell_fn: %s dropout: %s n_layer: %d " % ( self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer ) @@ -720,8 +720,8 @@ def __init__( super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info( - "ConvLSTMLayer %s: feature_map:%d, n_steps:%d, " - "in_dim:%d %s, cell_fn:%s " % + "ConvLSTMLayer %s: feature_map: %d, n_steps: %d, " + "in_dim: %d %s, cell_fn: %s " % (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__) ) # You can get the dimension by .get_shape() or ._shape, and check the @@ -1077,7 +1077,7 @@ def __init__( return_last = True logging.info( - "DynamicRNNLayer %s: n_hidden:%d, in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % ( + "DynamicRNNLayer %s: n_hidden: %d, in_dim: %d in_shape: %s cell_fn: %s dropout: %s n_layer: %d" % ( self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer ) @@ -1330,7 +1330,7 @@ def __init__( raise Exception("Please put in cell_fn") logging.info( - "BiDynamicRNNLayer %s: n_hidden:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % ( + "BiDynamicRNNLayer %s: n_hidden: %d in_dim: %d in_shape: %s cell_fn: %s dropout: %s n_layer: %d" % ( self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer ) @@ -1603,7 +1603,7 @@ def __init__( logging.warning("pop state_is_tuple fails.") logging.info( - "[*] Seq2Seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % + "[*] Seq2Seq %s: n_hidden: %d cell_fn: %s dropout: %s n_layer: %d" % (self.name, n_hidden, cell_fn.__name__, dropout, n_layer) ) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 8424b1f9d..c125999ac 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -119,7 +119,7 @@ def __init__(self, prev_layer, perm, name='transpose'): super(TransposeLayer, self).__init__(prev_layer=prev_layer, name=name) - logging.info("TransposeLayer %s: perm:%s" % (self.name, perm)) + logging.info("TransposeLayer %s: perm: %s" % (self.name, perm)) self.outputs = tf.transpose(self.inputs, perm=perm, name=name) self._add_layers(self.outputs) diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py index 52671ae70..5e71ad418 100644 --- a/tensorlayer/layers/spatial_transformer.py +++ b/tensorlayer/layers/spatial_transformer.py @@ -256,7 +256,7 @@ def __init__( out_size = [40, 40] logging.info( - "SpatialTransformer2dAffineLayer %s: in_size:%s out_size:%s" % + "SpatialTransformer2dAffineLayer %s: in_size: %s out_size: %s" % (self.name, self.inputs.get_shape().as_list(), out_size) ) diff --git a/tensorlayer/layers/special_activation.py b/tensorlayer/layers/special_activation.py index f41332b13..ecac3c9ee 100644 --- a/tensorlayer/layers/special_activation.py +++ b/tensorlayer/layers/special_activation.py @@ -51,7 +51,7 @@ def __init__( else: w_shape = int(self.inputs.get_shape()[-1]) - logging.info("PReluLayer %s: channel_shared:%s" % (self.name, channel_shared)) + logging.info("PReluLayer %s: channel_shared: %s" % (self.name, channel_shared)) # with tf.name_scope(name) as scope: with tf.variable_scope(name): diff --git a/tensorlayer/layers/super_resolution.py b/tensorlayer/layers/super_resolution.py index bfe9035ac..3bc8eec6b 100644 --- a/tensorlayer/layers/super_resolution.py +++ b/tensorlayer/layers/super_resolution.py @@ -84,7 +84,7 @@ def __init__(self, prev_layer, scale=2, n_out_channel=None, act=None, name='subp logging.info( "SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % - (self.name, scale, n_out_channel, self.act.__name__ if self.act is not None else '- No Activation') + (self.name, scale, n_out_channel, self.act.__name__ if self.act is not None else 'No Activation') ) with tf.variable_scope(name): @@ -151,7 +151,7 @@ def __init__(self, prev_layer, scale=2, act=None, name='subpixel_conv1d'): logging.info( "SubpixelConv1d %s: scale: %d act: %s" % - (self.name, scale, self.act.__name__ if self.act is not None else '- No Activation') + (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') ) with tf.name_scope(name): diff --git a/tensorlayer/layers/time_distribution.py b/tensorlayer/layers/time_distribution.py index 3869cfea8..3bfc160f9 100644 --- a/tensorlayer/layers/time_distribution.py +++ b/tensorlayer/layers/time_distribution.py @@ -70,7 +70,7 @@ def __init__( self.inputs = tf.transpose(tf.stack(self.inputs), [1, 0, 2]) logging.info( - "TimeDistributedLayer %s: layer_class:%s layer_args:%s" % + "TimeDistributedLayer %s: layer_class: %s layer_args: %s" % (self.name, layer_class.__name__, self.layer_args) ) diff --git a/tensorlayer/tl_logging.py b/tensorlayer/tl_logging.py index 902829e58..973f457f9 100644 --- a/tensorlayer/tl_logging.py +++ b/tensorlayer/tl_logging.py @@ -218,7 +218,7 @@ def google2_log_prefix(level, timestamp=None, file_and_line=None): if level in _level_names: severity = _level_names[level][0] - s = '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] ' % ( + s = '%c%02d%02d %02d: %02d: %02d.%06d %5d %s: %d] ' % ( severity, now_tuple[1], # month now_tuple[2], # day