Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ To release a new version, please update the changelog as followed:

### Changed
- remove `tl.layers.initialize_global_variables(sess)` (PR #931)
- change `tl.layers.core`, `tl.models.core` (PR #966)
- change `weights` into `all_weights`, `trainable_weights`, `nontrainable_weights`

### Dependencies Update
- nltk>=3.3,<3.4 => nltk>=3.3,<3.5 (PR #892)
Expand Down
8 changes: 4 additions & 4 deletions docs/modules/files.rst
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,14 @@ sake of cross-platform. Other file formats such as ``.npz`` are also available.
.. code-block:: python

## save model as .h5
tl.files.save_weights_to_hdf5('model.h5', network.weights)
tl.files.save_weights_to_hdf5('model.h5', network.all_weights)
# restore model from .h5 (in order)
tl.files.load_hdf5_to_weights_in_order('model.h5', network.weights)
tl.files.load_hdf5_to_weights_in_order('model.h5', network.all_weights)
# restore model from .h5 (by name)
tl.files.load_hdf5_to_weights('model.h5', network.weights)
tl.files.load_hdf5_to_weights('model.h5', network.all_weights)

## save model as .npz
tl.files.save_npz(network.weights , name='model.npz')
tl.files.save_npz(network.all_weights , name='model.npz')
# restore model from .npz (method 1)
load_params = tl.files.load_npz(name='model.npz')
tl.files.assign_weights(sess, load_params, network)
Expand Down
6 changes: 3 additions & 3 deletions docs/user/faq.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,19 +46,19 @@ To choose which variables to update, you can do as below.

.. code-block:: python

train_params = network.weights[3:]
train_params = network.trainable_weights[3:]

The second way is to get the variables by a given name. For example, if you want to get all variables which the layer name contain ``dense``, you can do as below.

.. code-block:: python

train_params = network.get_layer('dense').weights
train_params = network.get_layer('dense').trainable_weights

After you get the variable list, you can define your optimizer like that so as to update only a part of the variables.

.. code-block:: python

train_weights = network.weights
train_weights = network.trainable_weights
optimizer.apply_gradients(zip(grad, train_weights))

Logging
Expand Down
2 changes: 1 addition & 1 deletion docs/user/get_start_advance.rst
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Get a part of CNN
nn = tl.layers.Dense(n_units=100, name='out')(nn)
model = tl.models.Model(inputs=ni, outputs=nn)
# train your own classifier (only update the last layer)
train_params = model.get_layer('out').weights
train_params = model.get_layer('out').all_weights

Reuse CNN
------------------
Expand Down
6 changes: 3 additions & 3 deletions docs/user/get_start_model.rst
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,11 @@ We can get the specific weights by indexing or naming.
.. code-block:: python

# indexing
all_weights = MLP.weights
some_weights = MLP.weights[1:3]
all_weights = MLP.all_weights
some_weights = MLP.all_weights[1:3]

# naming
some_weights = MLP.get_layer('dense1').weights
some_weights = MLP.get_layer('dense1').all_weights


Save and restore model
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_cifar10_cnn_static.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def get_model_batchnorm(inputs_shape):
# learning_rate_decay_factor = 0.1
# num_epoch_decay = 350

train_weights = net.weights
train_weights = net.trainable_weights
# learning_rate = tf.Variable(init_learning_rate)
optimizer = tf.optimizers.Adam(learning_rate)

Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def forward(self, x, foo=None):
n_epoch = 500
batch_size = 500
print_freq = 5
train_weights = MLP.weights
train_weights = MLP.trainable_weights
optimizer = tf.optimizers.Adam(learning_rate=0.0001)

## the following code can help you understand SGD deeply
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mnist_mlp_dynamic_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def forward(self, x, foo=None):
n_epoch = 500
batch_size = 500
print_freq = 5
train_weights = MLP1.weights + MLP2.weights
train_weights = MLP1.trainable_weights + MLP2.trainable_weights
optimizer = tf.optimizers.Adam(learning_rate=0.0001)

## the following code can help you understand SGD deeply
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mnist_mlp_static.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def get_model(inputs_shape):
n_epoch = 500
batch_size = 500
print_freq = 5
train_weights = MLP.weights
train_weights = MLP.trainable_weights
optimizer = tf.optimizers.Adam(lr=0.0001)

## the following code can help you understand SGD deeply
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mnist_mlp_static_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def get_model(inputs_shape, hmodel):
n_epoch = 500
batch_size = 500
print_freq = 5
train_weights = MLP.weights
train_weights = MLP.trainable_weights
optimizer = tf.optimizers.Adam(lr=0.0001)

## the following code can help you understand SGD deeply
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_tutorials/tutorial_mnist_siamese.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def create_pairs(x, digit_indices):

# training settings
print_freq = 5
train_weights = model.weights
train_weights = model.trainable_weights
optimizer = tf.optimizers.RMSprop()


Expand Down
2 changes: 1 addition & 1 deletion examples/keras_tfslim/tutorial_keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
n_epoch = 200
learning_rate = 0.0001

train_params = network.weights
train_params = network.trainable_weights
optimizer = tf.optimizers.Adam(learning_rate)

for epoch in range(n_epoch):
Expand Down
2 changes: 1 addition & 1 deletion examples/reinforcement_learning/tutorial_atari_pong.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_model(inputs_shape):
M = tl.models.Model(inputs=ni, outputs=nn, name="mlp")
return M
model = get_model([None, D])
train_weights = model.weights
train_weights = model.trainable_weights
# probs = model(t_states, is_train=True).outputs
# sampling_prob = tf.nn.softmax(probs)

Expand Down
8 changes: 4 additions & 4 deletions examples/reinforcement_learning/tutorial_cartpole_ac.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@ def learn(self, s, a, td):
_logits = self.model([s]).outputs
# _probs = tf.nn.softmax(_logits)
_exp_v = tl.rein.cross_entropy_reward_loss(logits=_logits, actions=[a], rewards=td[0])
grad = tape.gradient(_exp_v, self.model.weights)
self.optimizer.apply_gradients(zip(grad, self.model.weights))
grad = tape.gradient(_exp_v, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights))
return _exp_v

def choose_action(self, s):
Expand Down Expand Up @@ -178,8 +178,8 @@ def learn(self, s, r, s_):
# TD_error = r + lambd * V(newS) - V(S)
td_error = r + LAMBDA * v_ - v
loss = tf.square(td_error)
grad = tape.gradient(loss, self.model.weights)
self.optimizer.apply_gradients(zip(grad, self.model.weights))
grad = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights))

return td_error

Expand Down
2 changes: 1 addition & 1 deletion examples/reinforcement_learning/tutorial_frozenlake_dqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def get_model(inputs_shape):
return tl.models.Model(inputs=ni, outputs=nn, name="Q-Network")
qnetwork = get_model([1, 16])
qnetwork.train()
train_weights = qnetwork.weights
train_weights = qnetwork.trainable_weights

# chose action greedily with reward. in Q-Learning, policy is greedy, so we use "max" to select the next action.
# predict = tf.argmax(y, 1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def forward(self, inputs):
learning_rate = 0.0001
print_freq = 10
batch_size = 64
train_weights = net.weights
train_weights = net.trainable_weights
optimizer = tf.optimizers.Adam(lr=learning_rate)

##================== TRAINING ================================================##
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_model(inputs_shape):
learning_rate = 0.0001
print_freq = 10
batch_size = 64
train_weights = net.weights
train_weights = net.trainable_weights
optimizer = tf.optimizers.Adam(lr=learning_rate)

##================== TRAINING ================================================##
Expand Down
4 changes: 2 additions & 2 deletions examples/text_classification/tutorial_imdb_fasttext.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ def train_test_and_save_model():
cost = tl.cost.cross_entropy(y_pred, y_batch, name='cost')

# backward, calculate gradients and update the weights
grad = tape.gradient(cost, model.weights)
optimizer.apply_gradients(zip(grad, model.weights))
grad = tape.gradient(cost, model.trainable_weights)
optimizer.apply_gradients(zip(grad, model.trainable_weights))

# calculate the accuracy
predictions = tf.argmax(y_pred, axis=1, output_type=tf.int32)
Expand Down
2 changes: 1 addition & 1 deletion examples/text_generation/tutorial_generate_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ def loss_fn(outputs, targets, batch_size, sequence_length):
# tvars = network.all_params $ all parameters
# tvars = network.all_params[1:] $ parameters except embedding matrix
# Train the whole network.
tvars = rnn_model.weights
tvars = rnn_model.trainable_weights
# grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)
# optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = tf.train.GradientDescentOptimizer(lr).minimize(cost, var_list=tvars)
Expand Down
4 changes: 2 additions & 2 deletions examples/text_word_embedding/tutorial_word2vec_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,8 +240,8 @@ def main_word2vec_basic():
with tf.GradientTape() as tape:
outputs, nce_cost = model([batch_inputs, batch_labels])

grad = tape.gradient(nce_cost, model.weights)
optimizer.apply_gradients(zip(grad, model.weights))
grad = tape.gradient(nce_cost, model.trainable_weights)
optimizer.apply_gradients(zip(grad, model.trainable_weights))

average_loss += nce_cost

Expand Down
2 changes: 1 addition & 1 deletion tensorlayer/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def save_model(self, network=None, model_name='model', **kwargs):
self._fill_project_info(kwargs) # put project_name into kwargs

# params = network.get_all_params()
params = network.weights
params = network.all_weights

s = time.time()

Expand Down
18 changes: 9 additions & 9 deletions tensorlayer/files/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1907,7 +1907,7 @@ def save_npz(save_list=None, name='model.npz'):
--------
Save model to npz

>>> tl.files.save_npz(network.weights, name='model.npz')
>>> tl.files.save_npz(network.all_weights, name='model.npz')

Load model from npz (Method 1)

Expand Down Expand Up @@ -1993,7 +1993,7 @@ def assign_weights(weights, network):
"""
ops = []
for idx, param in enumerate(weights):
ops.append(network.weights[idx].assign(param))
ops.append(network.all_weights[idx].assign(param))
return ops


Expand Down Expand Up @@ -2073,7 +2073,7 @@ def load_and_assign_npz_dict(name='model.npz', network=None, skip=False):
if len(weights.keys()) != len(set(weights.keys())):
raise Exception("Duplication in model npz_dict %s" % name)

net_weights_name = [w.name for w in network.weights]
net_weights_name = [w.name for w in network.all_weights]

for key in weights.keys():
if key not in net_weights_name:
Expand All @@ -2085,7 +2085,7 @@ def load_and_assign_npz_dict(name='model.npz', network=None, skip=False):
"if you want to skip redundant or mismatch weights." % key
)
else:
assign_tf_variable(network.weights[net_weights_name.index(key)], weights[key])
assign_tf_variable(network.all_weights[net_weights_name.index(key)], weights[key])
logging.info("[*] Model restored from npz_dict %s" % name)


Expand Down Expand Up @@ -2549,9 +2549,9 @@ def _save_weights_to_hdf5_group(f, layers):
elif isinstance(layer, tl.layers.LayerList):
_save_weights_to_hdf5_group(g, layer.layers)
elif isinstance(layer, tl.layers.Layer):
if layer.weights is not None:
weight_values = tf_variables_to_numpy(layer.weights)
weight_names = [w.name.encode('utf8') for w in layer.weights]
if layer.all_weights is not None:
weight_values = tf_variables_to_numpy(layer.all_weights)
weight_names = [w.name.encode('utf8') for w in layer.all_weights]
else:
weight_values = []
weight_names = []
Expand Down Expand Up @@ -2593,7 +2593,7 @@ def _load_weights_from_hdf5_group_in_order(f, layers):
elif isinstance(layer, tl.layers.Layer):
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
for iid, w_name in enumerate(weight_names):
assign_tf_variable(layer.weights[iid], np.asarray(g[w_name]))
assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name]))
else:
raise Exception("Only layer or model can be saved into hdf5.")
if idx == len(layers) - 1:
Expand Down Expand Up @@ -2639,7 +2639,7 @@ def _load_weights_from_hdf5_group(f, layers, skip=False):
elif isinstance(layer, tl.layers.Layer):
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
for iid, w_name in enumerate(weight_names):
assign_tf_variable(layer.weights[iid], np.asarray(g[w_name]))
assign_tf_variable(layer.all_weights[iid], np.asarray(g[w_name]))
else:
raise Exception("Only layer or model can be saved into hdf5.")

Expand Down
4 changes: 2 additions & 2 deletions tensorlayer/layers/convolution/separable_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def build(self, inputs_shape):
) # initialize weights
outputs_shape = _out.shape
# self._add_weights(self.layer.weights)
self._weights = self.layer.weights
self._trainable_weights = self.layer.weights

def forward(self, inputs):
outputs = self.layer(inputs)
Expand Down Expand Up @@ -302,7 +302,7 @@ def build(self, inputs_shape):
tf.convert_to_tensor(np.random.uniform(size=list(inputs_shape)), dtype=np.float)
) # initialize weights
outputs_shape = _out.shape
self._weights = self.layer.weights
self._trainable_weights = self.layer.weights

def forward(self, inputs):
outputs = self.layer(inputs)
Expand Down
4 changes: 2 additions & 2 deletions tensorlayer/layers/convolution/simplified_deconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def build(self, inputs_shape):
tf.convert_to_tensor(np.random.uniform(size=inputs_shape), dtype=np.float32)
) #np.random.uniform([1] + list(inputs_shape))) # initialize weights
outputs_shape = _out.shape
self._weights = self.layer.weights
self._trainable_weights = self.layer.weights

def forward(self, inputs):
outputs = self.layer(inputs)
Expand Down Expand Up @@ -264,7 +264,7 @@ def build(self, inputs_shape):
) #self.layer(np.random.uniform([1] + list(inputs_shape))) # initialize weights
outputs_shape = _out.shape
# self._add_weights(self.layer.weights)
self._weights = self.layer.weights
self._trainable_weights = self.layer.weights

def forward(self, inputs):
outputs = self.layer(inputs)
Expand Down
Loading