Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ To release a new version, please update the changelog as followed:
### Dependencies Update
- nltk>=3.3,<3.4 => nltk>=3.3,<3.5 (PR #892)
- pytest>=3.6,<3.11 => pytest>=3.6,<4.1 (PR #889)
- yapf>=0.22,<0.25 => yapf==0.25.0 (PR #896)

### Deprecated

Expand Down
2 changes: 1 addition & 1 deletion requirements/requirements_test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ pytest-cache>=1.0,<1.1
pytest-cov>=2.5,<2.7
pytest-xdist>=1.22,<1.25
sphinx>=1.7,<1.9
yapf>=0.22,<0.25
yapf==0.25.0
12 changes: 5 additions & 7 deletions tensorlayer/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,8 @@ def find_top_model(self, sess, sort=None, model_name='model', **kwargs):

pc = self.db.Model.find(kwargs)
print(
"[Database] Find one model SUCCESS. kwargs:{} sort:{} save time:{} took: {}s".format(
kwargs, sort, _datetime, round(time.time() - s, 2)
)
"[Database] Find one model SUCCESS. kwargs:{} sort:{} save time:{} took: {}s".
format(kwargs, sort, _datetime, round(time.time() - s, 2))
)

# put all informations of model into the TL layer
Expand Down Expand Up @@ -656,10 +655,9 @@ def run_top_task(self, task_name=None, sort=None, **kwargs):
}}, return_document=pymongo.ReturnDocument.AFTER
)
logging.info(
"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s".format(
task_name, sort, _datetime,
time.time() - s
)
"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s".
format(task_name, sort, _datetime,
time.time() - s)
)
return True
except Exception as e:
Expand Down
20 changes: 12 additions & 8 deletions tensorlayer/files/dataset_loaders/voc_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,9 @@ def _recursive_parse_xml_to_dict(xml):
imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
logging.info("[VOC] {} images found".format(len(imgs_file_list)))

imgs_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.jpg --> 2007000027
imgs_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.jpg --> 2007000027

imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]
# logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1])
Expand All @@ -215,17 +216,19 @@ def _recursive_parse_xml_to_dict(xml):
folder_semseg = os.path.join(path, extracted_filename, "SegmentationClass")
imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='\\.png', printable=False)
logging.info("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list)))
imgs_semseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_semseg_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list]
# logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1])
##======== 3. instance segmentation maps path list
# folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/"
folder_insseg = os.path.join(path, extracted_filename, "SegmentationObject")
imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='\\.png', printable=False)
logging.info("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list)))
imgs_insseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_insseg_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list]
# logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1])
else:
Expand All @@ -238,8 +241,9 @@ def _recursive_parse_xml_to_dict(xml):
logging.info(
"[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list))
)
imgs_ann_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.xml --> 2007000027
imgs_ann_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.xml --> 2007000027
imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list]
# logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1])

Expand Down
20 changes: 12 additions & 8 deletions tensorlayer/files/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1189,8 +1189,9 @@ def _recursive_parse_xml_to_dict(xml):
imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
logging.info("[VOC] {} images found".format(len(imgs_file_list)))

imgs_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.jpg --> 2007000027
imgs_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.jpg --> 2007000027

imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]
# logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1])
Expand All @@ -1200,17 +1201,19 @@ def _recursive_parse_xml_to_dict(xml):
folder_semseg = os.path.join(path, extracted_filename, "SegmentationClass")
imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='\\.png', printable=False)
logging.info("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list)))
imgs_semseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_semseg_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list]
# logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1])
# ======== 3. instance segmentation maps path list
# folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/"
folder_insseg = os.path.join(path, extracted_filename, "SegmentationObject")
imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='\\.png', printable=False)
logging.info("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list)))
imgs_insseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_insseg_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000032.png --> 2007000032
imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list]
# logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1])
else:
Expand All @@ -1223,8 +1226,9 @@ def _recursive_parse_xml_to_dict(xml):
logging.info(
"[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list))
)
imgs_ann_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.xml --> 2007000027
imgs_ann_file_list.sort(
key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])
) # 2007_000027.xml --> 2007000027
imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list]
# logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1])

Expand Down
5 changes: 3 additions & 2 deletions tensorlayer/layers/convolution/deformable_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,9 @@ def __init__(
input_h = int(self.inputs.get_shape()[1])
input_w = int(self.inputs.get_shape()[2])
kernel_n = shape[0] * shape[1]
initial_offsets = tf.stack(tf.meshgrid(tf.range(shape[0]), tf.range(shape[1]),
indexing='ij')) # initial_offsets --> (kh, kw, 2)
initial_offsets = tf.stack(
tf.meshgrid(tf.range(shape[0]), tf.range(shape[1]), indexing='ij')
) # initial_offsets --> (kh, kw, 2)
initial_offsets = tf.reshape(initial_offsets, (-1, 2)) # initial_offsets --> (n, 2)
initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, n, 2)
initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, 1, n, 2)
Expand Down
5 changes: 3 additions & 2 deletions tensorlayer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -573,8 +573,9 @@ def exit_tensorflow(sess=None, port=6006):

elif _platform == "darwin":
tl.logging.info('OS X: %s' % text)
subprocess.Popen("lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill",
shell=True) # kill tensorboard
subprocess.Popen(
"lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True
) # kill tensorboard
elif _platform == "win32":
raise NotImplementedError("this function is not supported on the Windows platform")

Expand Down
5 changes: 3 additions & 2 deletions tensorlayer/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -645,8 +645,9 @@ def draw_weights(W=None, second=10, saveable=True, shape=None, name='mnist', fig
# feature = np.zeros_like(feature)
# if np.mean(feature) < -0.015: # condition threshold
# feature = np.zeros_like(feature)
plt.imshow(np.reshape(feature, (shape[0], shape[1])), cmap='gray',
interpolation="nearest") # , vmin=np.min(feature), vmax=np.max(feature))
plt.imshow(
np.reshape(feature, (shape[0], shape[1])), cmap='gray', interpolation="nearest"
) # , vmin=np.min(feature), vmax=np.max(feature))
# plt.title(name)
# ------------------------------------------------------------
# plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation="nearest")
Expand Down
5 changes: 3 additions & 2 deletions tests/test_tf_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,9 @@ def get_network_3d(inputs, reuse=False):
with tf.variable_scope("3D_network", reuse=reuse):
net = tl.layers.InputLayer(inputs)

net1 = tl.layers.Conv3dLayer(net, shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1),
name="Conv3dLayer") # 2 params
net1 = tl.layers.Conv3dLayer(
net, shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1), name="Conv3dLayer"
) # 2 params
net2 = tl.layers.DeConv3d(net1, name="DeConv3d") # 2 params
net3 = tl.layers.MaxPool3d(net2, (1, 1, 1), name="MaxPool3d") # 0 params
net4 = tl.layers.MeanPool3d(net3, (1, 1, 1), name="MeanPool3d") # 0 params
Expand Down