Skip to content

Commit 14b6fd6

Browse files
committed
update to TensorLayer 1.8.1
1 parent 8ebb42f commit 14b6fd6

File tree

113 files changed

+14628
-8597
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

113 files changed

+14628
-8597
lines changed

.gitignore

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
*.gz
2+
*.npz
3+
*.pyc
4+
*~
5+
.DS_Store
6+
.idea
7+
.spyproject/
8+
build/
9+
dist
10+
docs/_build
11+
tensorlayer.egg-info
12+
tensorlayer/__pacache__

main.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
import os, sys, pprint, time
2-
import scipy.misc
1+
import os, pprint, time
32
import numpy as np
43
import tensorflow as tf
54
import tensorlayer as tl
@@ -111,7 +110,7 @@ def main(_):
111110
## load image data
112111
batch_idxs = min(len(data_files), FLAGS.train_size) // FLAGS.batch_size
113112

114-
for idx in xrange(0, batch_idxs):
113+
for idx in range(0, batch_idxs):
115114
batch_files = data_files[idx*FLAGS.batch_size:(idx+1)*FLAGS.batch_size]
116115
## get real images
117116
# more image augmentation functions in http://tensorlayer.readthedocs.io/en/latest/modules/prepro.html

tensorlayer/__init__.py

+8-10
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
1-
"""
2-
Deep learning and Reinforcement learning library for Researchers and Engineers
3-
"""
4-
# from __future__ import absolute_import
5-
1+
"""Deep learning and Reinforcement learning library for Researchers and Engineers"""
2+
from __future__ import absolute_import
63

74
try:
85
install_instr = "Please make sure you install a recent enough version of TensorFlow."
@@ -11,21 +8,22 @@
118
raise ImportError("__init__.py : Could not import TensorFlow." + install_instr)
129

1310
from . import activation
14-
act = activation
1511
from . import cost
1612
from . import files
17-
# from . import init
1813
from . import iterate
1914
from . import layers
20-
from . import ops
2115
from . import utils
2216
from . import visualize
23-
from . import prepro # was preprocesse
17+
from . import prepro
2418
from . import nlp
2519
from . import rein
20+
from . import distributed
2621

22+
# alias
23+
act = activation
24+
vis = visualize
2725

28-
__version__ = "1.4.5"
26+
__version__ = "1.8.1"
2927

3028
global_flag = {}
3129
global_dict = {}
Binary file not shown.
971 Bytes
Binary file not shown.
545 Bytes
Binary file not shown.
648 Bytes
Binary file not shown.
Binary file not shown.
Binary file not shown.
24.2 KB
Binary file not shown.
24.1 KB
Binary file not shown.
Binary file not shown.
Binary file not shown.
60.6 KB
Binary file not shown.
60.4 KB
Binary file not shown.
9.26 KB
Binary file not shown.
9.2 KB
Binary file not shown.
34.9 KB
Binary file not shown.
34.8 KB
Binary file not shown.
90.3 KB
Binary file not shown.
90 KB
Binary file not shown.
5.11 KB
Binary file not shown.
5.05 KB
Binary file not shown.
20.1 KB
Binary file not shown.
19.9 KB
Binary file not shown.
14.6 KB
Binary file not shown.
14.5 KB
Binary file not shown.

tensorlayer/_logging.py

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import logging as _logger
2+
3+
logging = _logger.getLogger('tensorlayer')
4+
logging.setLevel(_logger.INFO)
5+
_hander = _logger.StreamHandler()
6+
formatter = _logger.Formatter('[TL] %(message)s')
7+
_hander.setFormatter(formatter)
8+
logging.addHandler(_hander)
9+
10+
11+
def info(fmt, *args):
12+
logging.info(fmt, *args)
13+
14+
15+
def warning(fmt, *args):
16+
logging.warning(fmt, *args)

tensorlayer/activation.py

+104-59
Original file line numberDiff line numberDiff line change
@@ -1,109 +1,154 @@
11
#! /usr/bin/python
2-
# -*- coding: utf8 -*-
2+
# -*- coding: utf-8 -*-
33

4+
import tensorflow as tf
5+
from tensorflow.python.util.deprecation import deprecated
46

7+
__all__ = [
8+
'identity',
9+
'ramp',
10+
'leaky_relu',
11+
'swish',
12+
'pixel_wise_softmax',
13+
'linear',
14+
'lrelu',
15+
]
516

6-
import tensorflow as tf
717

8-
def identity(x, name=None):
9-
"""The identity activation function, Shortcut is ``linear``.
18+
@deprecated("2018-06-30", "This API will be deprecated soon as tf.identity can do the same thing.")
19+
def identity(x):
20+
"""The identity activation function.
21+
Shortcut is ``linear``.
1022
1123
Parameters
1224
----------
13-
x : a tensor input
14-
input(s)
15-
25+
x : Tensor
26+
input.
1627
1728
Returns
18-
--------
19-
A `Tensor` with the same type as `x`.
29+
-------
30+
Tensor
31+
A ``Tensor`` in the same type as ``x``.
32+
2033
"""
2134
return x
2235

23-
# Shortcut
24-
linear = identity
2536

26-
def ramp(x=None, v_min=0, v_max=1, name=None):
37+
def ramp(x, v_min=0, v_max=1, name=None):
2738
"""The ramp activation function.
2839
2940
Parameters
3041
----------
31-
x : a tensor input
32-
input(s)
42+
x : Tensor
43+
input.
3344
v_min : float
34-
if input(s) smaller than v_min, change inputs to v_min
45+
cap input to v_min as a lower bound.
3546
v_max : float
36-
if input(s) greater than v_max, change inputs to v_max
37-
name : a string or None
38-
An optional name to attach to this activation function.
39-
47+
cap input to v_max as a upper bound.
48+
name : str
49+
The function name (optional).
4050
4151
Returns
42-
--------
43-
A `Tensor` with the same type as `x`.
52+
-------
53+
Tensor
54+
A ``Tensor`` in the same type as ``x``.
55+
4456
"""
4557
return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name)
4658

47-
def leaky_relu(x=None, alpha=0.1, name="LeakyReLU"):
59+
60+
def leaky_relu(x, alpha=0.1, name="lrelu"):
4861
"""The LeakyReLU, Shortcut is ``lrelu``.
4962
50-
Modified version of ReLU, introducing a nonzero gradient for negative
51-
input.
63+
Modified version of ReLU, introducing a nonzero gradient for negative input.
5264
5365
Parameters
5466
----------
55-
x : A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
56-
`int16`, or `int8`.
57-
alpha : `float`. slope.
58-
name : a string or None
59-
An optional name to attach to this activation function.
67+
x : Tensor
68+
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``,
69+
``int16``, or ``int8``.
70+
alpha : float
71+
Slope.
72+
name : str
73+
The function name (optional).
6074
6175
Examples
62-
---------
63-
>>> network = tl.layers.DenseLayer(network, n_units=100, name = 'dense_lrelu',
64-
... act= lambda x : tl.act.lrelu(x, 0.2))
76+
--------
77+
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')
78+
79+
Returns
80+
-------
81+
Tensor
82+
A ``Tensor`` in the same type as ``x``.
6583
6684
References
6785
------------
68-
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) <http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf>`_
86+
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) <http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf>`__
87+
6988
"""
70-
with tf.name_scope(name) as scope:
71-
# x = tf.nn.relu(x)
72-
# m_x = tf.nn.relu(-x)
73-
# x -= alpha * m_x
74-
x = tf.maximum(x, alpha * x)
89+
# with tf.name_scope(name) as scope:
90+
# x = tf.nn.relu(x)
91+
# m_x = tf.nn.relu(-x)
92+
# x -= alpha * m_x
93+
x = tf.maximum(x, alpha * x, name=name)
7594
return x
7695

77-
#Shortcut
78-
lrelu = leaky_relu
7996

80-
def pixel_wise_softmax(output, name='pixel_wise_softmax'):
97+
def swish(x, name='swish'):
98+
"""The Swish function.
99+
See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__.
100+
101+
Parameters
102+
----------
103+
x : Tensor
104+
input.
105+
name: str
106+
function name (optional).
107+
108+
Returns
109+
-------
110+
Tensor
111+
A ``Tensor`` in the same type as ``x``.
112+
113+
"""
114+
with tf.name_scope(name):
115+
x = tf.nn.sigmoid(x) * x
116+
return x
117+
118+
119+
@deprecated("2018-06-30", "This API will be deprecated soon as tf.nn.softmax can do the same thing.")
120+
def pixel_wise_softmax(x, name='pixel_wise_softmax'):
81121
"""Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
82122
Usually be used for image segmentation.
83123
84124
Parameters
85-
------------
86-
output : tensor
87-
- For 2d image, 4D tensor [batch_size, height, weight, channel], channel >= 2.
88-
- For 3d image, 5D tensor [batch_size, depth, height, weight, channel], channel >= 2.
125+
----------
126+
x : Tensor
127+
input.
128+
- For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2.
129+
- For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2.
130+
name : str
131+
function name (optional)
132+
133+
Returns
134+
-------
135+
Tensor
136+
A ``Tensor`` in the same type as ``x``.
89137
90138
Examples
91-
---------
139+
--------
92140
>>> outputs = pixel_wise_softmax(network.outputs)
93141
>>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)
94142
95143
References
96-
-----------
97-
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`_
144+
----------
145+
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__
146+
98147
"""
99-
with tf.name_scope(name) as scope:
100-
return tf.nn.softmax(output)
101-
## old implementation
102-
# exp_map = tf.exp(output)
103-
# if output.get_shape().ndims == 4: # 2d image
104-
# evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, True]))
105-
# elif output.get_shape().ndims == 5: # 3d image
106-
# evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, False, True]))
107-
# else:
108-
# raise Exception("output parameters should be 2d or 3d image, not %s" % str(output._shape))
109-
# return tf.div(exp_map, evidence)
148+
with tf.name_scope(name):
149+
return tf.nn.softmax(x)
150+
151+
152+
# Alias
153+
linear = identity
154+
lrelu = leaky_relu

tensorlayer/cli/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
"""The tensorlayer.cli module provides a command-line tool for some common tasks."""

tensorlayer/cli/__main__.py

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import argparse
2+
3+
from tensorlayer.cli import train
4+
5+
if __name__ == "__main__":
6+
parser = argparse.ArgumentParser(prog='tl')
7+
subparsers = parser.add_subparsers(dest='cmd')
8+
train_parser = subparsers.add_parser('train', help='train a model using multiple local GPUs or CPUs.')
9+
train.build_arg_parser(train_parser)
10+
args = parser.parse_args()
11+
if args.cmd == 'train':
12+
train.main(args)
13+
else:
14+
parser.print_help()

0 commit comments

Comments
 (0)