Skip to content

Commit 5580a7a

Browse files
committed
Update tests
1 parent c4c921f commit 5580a7a

File tree

10 files changed

+710
-2
lines changed

10 files changed

+710
-2
lines changed

tests/layers/test_layers_linear.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ def setUpClass(self):
7474
self.inputs_shape = [self.batch_size, 10]
7575

7676
self.ni = tlx.nn.Input(self.inputs_shape, name='input_layer')
77-
self.layer1 = tlx.nn.DropconnectLinear(out_features=5, keep=1.0)
78-
self.layer2 = tlx.nn.DropconnectLinear(out_features=5, in_features=10, keep=0.01)
77+
self.layer1 = tlx.nn.DropconnectLinear(out_features=5, keep=0.1)
78+
self.layer2 = tlx.nn.DropconnectLinear(out_features=5, in_features=10, keep=0.99)
7979
self.n1 = self.layer1(self.ni)
8080
self.n2 = self.layer2(self.ni)
8181

tests/utils/__init__.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
#!/usr/bin/env python
2+
# -*- coding: utf-8 -*-
3+
4+
from tests.utils.custom_testcase import *
5+
from tests.utils.list_py_files import *
6+
from tests.utils.timeout_utils import *
7+
8+
from tests.utils.custom_layers import *
9+
from tests.utils.custom_networks import *
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
#!/usr/bin/env python
2+
# -*- coding: utf-8 -*-
3+
4+
from tests.utils.custom_layers.basic_layers import *
5+
from tests.utils.custom_layers.inception_blocks import *
Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
#! /usr/bin/python
2+
# -*- coding: utf-8 -*-
3+
4+
import tensorflow as tf
5+
import tensorlayer as tl
6+
7+
__all__ = [
8+
'activation_module',
9+
'conv_module',
10+
'dense_module',
11+
]
12+
13+
14+
def activation_module(layer, activation_fn, leaky_relu_alpha=0.2, name=None):
15+
16+
act_name = name + "/activation" if name is not None else "activation"
17+
18+
if activation_fn not in ["ReLU", "ReLU6", "Leaky_ReLU", "PReLU", "PReLU6", "PTReLU6", "CReLU", "ELU", "SELU",
19+
"tanh", "sigmoid", "softmax", None]:
20+
raise Exception("Unknown 'activation_fn': %s" % activation_fn)
21+
22+
elif activation_fn == "ReLU":
23+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.relu, name=act_name)
24+
25+
elif activation_fn == "ReLU6":
26+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.relu6, name=act_name)
27+
28+
elif activation_fn == "Leaky_ReLU":
29+
layer = tl.layers.LambdaLayer(
30+
prev_layer=layer, fn=tf.nn.leaky_relu, fn_args={'alpha': leaky_relu_alpha}, name=act_name
31+
)
32+
33+
elif activation_fn == "PReLU":
34+
layer = tl.layers.PReluLayer(prev_layer=layer, channel_shared=False, name=act_name)
35+
36+
elif activation_fn == "PReLU6":
37+
layer = tl.layers.PRelu6Layer(prev_layer=layer, channel_shared=False, name=act_name)
38+
39+
elif activation_fn == "PTReLU6":
40+
layer = tl.layers.PTRelu6Layer(prev_layer=layer, channel_shared=False, name=act_name)
41+
42+
elif activation_fn == "CReLU":
43+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.crelu, name=act_name)
44+
45+
elif activation_fn == "ELU":
46+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.elu, name=act_name)
47+
48+
elif activation_fn == "SELU":
49+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.selu, name=act_name)
50+
51+
elif activation_fn == "tanh":
52+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.tanh, name=act_name)
53+
54+
elif activation_fn == "sigmoid":
55+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.sigmoid, name=act_name)
56+
57+
elif activation_fn == "softmax":
58+
layer = tl.layers.LambdaLayer(prev_layer=layer, fn=tf.nn.softmax, name=act_name)
59+
60+
return layer
61+
62+
63+
def conv_module(
64+
prev_layer, n_out_channel, filter_size, strides, padding, is_train=True, use_batchnorm=True, activation_fn=None,
65+
conv_init=tl.initializers.random_uniform(),
66+
batch_norm_init=tl.initializers.truncated_normal(mean=1.,
67+
stddev=0.02), bias_init=tf.zeros_initializer(), name=None
68+
):
69+
70+
if activation_fn not in ["ReLU", "ReLU6", "Leaky_ReLU", "PReLU", "PReLU6", "PTReLU6", "CReLU", "ELU", "SELU",
71+
"tanh", "sigmoid", "softmax", None]:
72+
raise Exception("Unknown 'activation_fn': %s" % activation_fn)
73+
74+
conv_name = 'conv2d' if name is None else name
75+
bn_name = 'batch_norm' if name is None else name + '/BatchNorm'
76+
77+
layer = tl.layers.Conv2d(
78+
prev_layer,
79+
n_filter=n_out_channel,
80+
filter_size=filter_size,
81+
strides=strides,
82+
padding=padding,
83+
act=None,
84+
W_init=conv_init,
85+
b_init=None if use_batchnorm else bias_init, # Not useful as the convolutions are batch normalized
86+
name=conv_name
87+
)
88+
89+
if use_batchnorm:
90+
91+
layer = tl.layers.BatchNormLayer(layer, act=None, is_train=is_train, gamma_init=batch_norm_init, name=bn_name)
92+
93+
logits = layer.outputs
94+
95+
layer = activation_module(layer, activation_fn, name=conv_name)
96+
97+
return layer, logits
98+
99+
100+
def dense_module(
101+
prev_layer, n_units, is_train, use_batchnorm=True, activation_fn=None,
102+
dense_init=tl.initializers.random_uniform(),
103+
batch_norm_init=tl.initializers.truncated_normal(mean=1.,
104+
stddev=0.02), bias_init=tf.zeros_initializer(), name=None
105+
):
106+
107+
if activation_fn not in ["ReLU", "ReLU6", "Leaky_ReLU", "PReLU", "PReLU6", "PTReLU6", "CReLU", "ELU", "SELU",
108+
"tanh", "sigmoid", "softmax", None]:
109+
raise Exception("Unknown 'activation_fn': %s" % activation_fn)
110+
111+
# Flatten: Conv to FC
112+
if prev_layer.outputs.get_shape().__len__() != 2: # The input dimension must be rank 2
113+
layer = tl.layers.FlattenLayer(prev_layer, name='flatten')
114+
115+
else:
116+
layer = prev_layer
117+
118+
layer = tl.layers.DenseLayer(
119+
layer,
120+
n_units=n_units,
121+
act=None,
122+
W_init=dense_init,
123+
b_init=None if use_batchnorm else bias_init, # Not useful as the convolutions are batch normalized
124+
name='dense' if name is None else name
125+
)
126+
127+
if use_batchnorm:
128+
layer = tl.layers.BatchNormLayer(
129+
layer, act=None, is_train=is_train, gamma_init=batch_norm_init, name='batch_norm'
130+
)
131+
132+
logits = layer.outputs
133+
134+
layer = activation_module(layer, activation_fn)
135+
136+
return layer, logits

0 commit comments

Comments
 (0)