Skip to content

Commit e255434

Browse files
committed
update docs
1 parent 5580a7a commit e255434

31 files changed

+474
-1015
lines changed

examples/basic_tutorials/automatic_inference_input_shape.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def __len__(self):
6262
batch_size = 500
6363
print_freq = 5
6464
train_weights = MLP.trainable_weights
65-
optimizer = tlx.optimizers.Adam(learning_rate=0.0001)
65+
optimizer = tlx.optimizers.Adam(0.0001)
6666
train_dataset = mnistdataset(data=X_train, label=y_train)
6767
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
6868
val_dataset = mnistdataset(data=X_val, label=y_val)

examples/basic_tutorials/cifar10_cnn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33

44
import os
55
# os.environ['TL_BACKEND'] = 'paddle'
6-
os.environ['TL_BACKEND'] = 'tensorflow'
6+
# os.environ['TL_BACKEND'] = 'tensorflow'
77
# os.environ['TL_BACKEND'] = 'mindspore'
8-
# os.environ['TL_BACKEND'] = 'torch'
8+
os.environ['TL_BACKEND'] = 'torch'
99

1010
import time
1111
from tensorlayerx.dataflow import Dataset, DataLoader

examples/basic_tutorials/gradient_clip_mixed_tensorflow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def __len__(self):
5454
train_weights = MLP.trainable_weights
5555
train_dataset = mnistdataset(data=X_train, label=y_train)
5656
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
57-
optimizer = tlx.optimizers.Adam(learning_rate=0.0001, weight_decay= 0.001, grad_clip=tlx.ops.ClipGradByValue())
57+
optimizer = tlx.optimizers.Adam(lr=0.0001, weight_decay= 0.001, grad_clip=tlx.ops.ClipGradByValue())
5858

5959
net_with_loss = tlx.model.WithLoss(backbone=MLP, loss_fn=tlx.losses.softmax_cross_entropy_with_logits)
6060
net_with_grad_train = tlx.model.TrainOneStep(net_with_loss, optimizer, train_weights)

examples/basic_tutorials/imdb_LSTM_simple.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55
import os
66
# os.environ['TL_BACKEND'] = 'tensorflow'
77
# os.environ['TL_BACKEND'] = 'mindspore'
8-
# os.environ['TL_BACKEND'] = 'paddle'
9-
os.environ['TL_BACKEND'] = 'torch'
8+
os.environ['TL_BACKEND'] = 'paddle'
9+
# os.environ['TL_BACKEND'] = 'torch'
1010
import tensorlayerx as tlx
1111
from tensorlayerx.nn import Module
1212
from tensorlayerx.nn import Linear, LSTM, Embedding
@@ -41,7 +41,7 @@ class ImdbNet(Module):
4141

4242
def __init__(self):
4343
super(ImdbNet, self).__init__()
44-
self.embedding = Embedding(vocabulary_size=vocab_size, embedding_size=64)
44+
self.embedding = Embedding(num_embeddings=vocab_size, embedding_dim=64)
4545
self.lstm = LSTM(input_size=64, hidden_size=64)
4646
self.linear1 = Linear(in_features=64, out_features=64, act=tlx.ReLU)
4747
self.linear2 = Linear(in_features=64, out_features=2)

examples/basic_tutorials/mnist_gan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,8 @@ def forward(self, real_data, g_data):
109109
# loss_fn = tlx.losses.sigmoid_cross_entropy
110110
# optimizer = tlx.optimizers.Momentum(learning_rate=5e-4, momentum=0.5)
111111
loss_fn = tlx.losses.mean_squared_error
112-
optimizer_g = tlx.optimizers.Adam(learning_rate=3e-4, beta_1=0.5, beta_2=0.999)
113-
optimizer_d = tlx.optimizers.Adam(learning_rate=3e-4)
112+
optimizer_g = tlx.optimizers.Adam(lr=3e-4, beta_1=0.5, beta_2=0.999)
113+
optimizer_d = tlx.optimizers.Adam(lr=3e-4)
114114

115115
g_weights = G.trainable_weights
116116
d_weights = D.trainable_weights

examples/basic_tutorials/mnist_mlp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,10 @@
33

44
# The same set of code can switch the backend with one line
55
import os
6-
os.environ['TL_BACKEND'] = 'tensorflow'
6+
# os.environ['TL_BACKEND'] = 'tensorflow'
77
# os.environ['TL_BACKEND'] = 'mindspore'
88
# os.environ['TL_BACKEND'] = 'paddle'
9-
# os.environ['TL_BACKEND'] = 'torch'
9+
os.environ['TL_BACKEND'] = 'torch'
1010

1111
import tensorlayerx as tlx
1212
from tensorlayerx.nn import Module

examples/basic_tutorials/tutorial_using_tensorboradX.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def forward(self, x, foo=None):
9191
batch_size = 500
9292
print_freq = 1
9393
train_weights = MLP.trainable_weights
94-
optimizer = tlx.optimizers.Adam(learning_rate=0.0001)
94+
optimizer = tlx.optimizers.Adam(lr=0.0001)
9595
train_batch = 0
9696
test_batch = 0
9797

tensorlayerx/backend/ops/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from .load_backend import relu
1212
from .load_backend import elu
1313
from .load_backend import relu6
14+
from .load_backend import prelu
1415
from .load_backend import leaky_relu
1516
from .load_backend import sigmoid
1617
from .load_backend import softmax
@@ -43,6 +44,7 @@
4344
from .load_backend import ReLU
4445
from .load_backend import ELU
4546
from .load_backend import ReLU6
47+
from .load_backend import PReLU
4648
from .load_backend import LeakyReLU
4749
from .load_backend import Softplus
4850
from .load_backend import Tanh

tensorlayerx/backend/ops/mindspore_nn.py

Lines changed: 27 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -271,15 +271,15 @@ def relu6(x):
271271

272272
class LeakyReLU(Cell):
273273

274-
def __init__(self, alpha=0.2):
274+
def __init__(self, negative_slope=0.01):
275275
super(LeakyReLU, self).__init__()
276-
self.leakyrelu = ms.nn.LeakyReLU(alpha=alpha)
276+
self.leakyrelu = ms.nn.LeakyReLU(alpha=negative_slope)
277277

278278
def construct(self, x):
279279
return self.leakyrelu(x)
280280

281281

282-
def leaky_relu(x, alpha=0.2):
282+
def leaky_relu(x, negative_slope=0.2):
283283
"""
284284
Compute the Leaky ReLU activation function.
285285
@@ -294,9 +294,9 @@ def leaky_relu(x, alpha=0.2):
294294
The activation value.
295295
"""
296296

297-
leaky_relu = LeakyReLU(alpha=alpha)
297+
leaky_relu = ms.nn.LeakyReLU(alpha=negative_slope)
298298
output = leaky_relu(x)
299-
return leaky_relu
299+
return output
300300

301301

302302
class Softplus(Cell):
@@ -348,15 +348,15 @@ def sigmoid(x):
348348

349349
class Softmax(Cell):
350350

351-
def __init__(self):
351+
def __init__(self, axis = -1):
352352
super(Softmax, self).__init__()
353-
self.softmax = P.Softmax()
353+
self.softmax = P.Softmax(axis)
354354

355355
def construct(self, x):
356356
return self.softmax(x)
357357

358358

359-
def softmax(logits, axis=None):
359+
def softmax(logits, axis=-1):
360360
"""
361361
Computes softmax activations.
362362
@@ -2392,3 +2392,22 @@ def __init__(
23922392

23932393
def construct(self, inputs):
23942394
raise NotImplementedError
2395+
2396+
class PReLU(Cell):
2397+
2398+
def __init__(self, data_format):
2399+
super(PReLU, self).__init__()
2400+
self.data_format = data_format
2401+
2402+
def __call__(self, input, weight):
2403+
2404+
prelu = P.PReLU()
2405+
v = prelu(input, F.cast(weight, input.dtype))
2406+
return v
2407+
2408+
2409+
def prelu(input, weight, data_format):
2410+
2411+
prelu = P.PReLU()
2412+
v = prelu(input, F.cast(weight, input.dtype))
2413+
return v

tensorlayerx/backend/ops/paddle_nn.py

Lines changed: 24 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -257,14 +257,14 @@ def relu6(x):
257257

258258
class LeakyReLU(object):
259259

260-
def __init__(self, alpha=0.2):
261-
self.alpha = alpha
260+
def __init__(self, negative_slope=0.2):
261+
self.negative_slope = negative_slope
262262

263263
def __call__(self, x):
264-
return F.leaky_relu(x, negative_slope=self.alpha)
264+
return F.leaky_relu(x, negative_slope=self.negative_slope)
265265

266266

267-
def leaky_relu(x):
267+
def leaky_relu(x, negative_slope=0.01):
268268
"""
269269
Compute the Leaky ReLU activation function.
270270
@@ -279,7 +279,7 @@ def leaky_relu(x):
279279
The activation value.
280280
"""
281281

282-
return F.leaky_relu(x)
282+
return F.leaky_relu(x, negative_slope)
283283

284284

285285
class Softplus(object):
@@ -327,11 +327,11 @@ def sigmoid(x):
327327

328328
class Softmax(object):
329329

330-
def __init__(self):
331-
pass
330+
def __init__(self, axis = -1):
331+
self.axis = axis
332332

333333
def __call__(self, x):
334-
return F.softmax(x)
334+
return F.softmax(x, axis=self.axis)
335335

336336

337337
def softmax(logits, axis=-1):
@@ -2011,3 +2011,19 @@ def __init__(
20112011

20122012
def __call__(self, inputs):
20132013
raise NotImplementedError
2014+
2015+
2016+
class PReLU(object):
2017+
2018+
def __init__(self, data_format):
2019+
self.data_format, _ = preprocess_2d_format(data_format, None)
2020+
2021+
def __call__(self, input, weight):
2022+
2023+
return F.prelu(input, weight, data_format=self.data_format)
2024+
2025+
2026+
def prelu(input, weight, data_format):
2027+
2028+
data_format, _ = preprocess_2d_format(data_format, None)
2029+
return F.prelu(input, weight, data_format=data_format)

0 commit comments

Comments
 (0)