Skip to content

Commit 7593500

Browse files
committed
fix torch opti, update ops
1 parent 03a677c commit 7593500

36 files changed

+900
-142
lines changed

docs/index.rst

+1
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ method, this part of the documentation is for you.
5656

5757
modules/activation
5858
modules/losses
59+
modules/metrics
5960
modules/dataflow
6061
modules/files
6162
modules/nn

docs/modules/losses.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ API - Losses
22
==================
33

44
To make TensorLayerX simple, we minimize the number of cost functions as much as we can.
5-
For more complex activation, TensorFlow(MindSpore, PaddlePaddle, PyTorch) API will be required.
5+
For more complex loss function, TensorFlow(MindSpore, PaddlePaddle, PyTorch) API will be required.
66

77
.. note::
88
Please refer to `Getting Started <https://github.com/tensorlayer/TensorLayerX/tree/master/docs/user>`_ for getting specific weights for weight regularization.

docs/modules/metrics.rst

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
API - Metrics
2+
==================
3+
4+
The tensorlayerx.metrics directory contians Accuracy, Auc, Precision and Recall.
5+
For more complex metrics, you can encapsulates metric logic and APIs by base class.
6+
7+
8+
.. automodule:: tensorlayerx.metrics
9+
10+
Metric list
11+
-------------
12+
13+
.. autosummary::
14+
15+
Metric
16+
Accuracy
17+
Auc
18+
Precision
19+
Recall
20+
acc
21+
22+
23+
24+
Metric
25+
^^^^^^^^^^^^^^^^
26+
.. autoclass:: Metric
27+
28+
29+
Accuracy
30+
""""""""""""""""""""""""""
31+
.. autoclass:: Accuracy
32+
:members:
33+
34+
35+
Auc
36+
""""""""""""""""""""""""""
37+
.. autoclass:: Auc
38+
:members:
39+
40+
41+
Precision
42+
""""""""""""""""""""""""""
43+
.. autoclass:: Precision
44+
:members:
45+
46+
47+
Recall
48+
""""""""""""""""""""""""""
49+
.. autoclass:: Recall
50+
:members:
51+
52+
53+
acc
54+
""""""""""""""""""""""""""
55+
.. autofunction:: acc

docs/modules/ops.rst

+11-1
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,8 @@ API - Operations
114114
is_tensor
115115
tensor_scatter_nd_update
116116
diag
117+
mask_select
118+
eye
117119

118120
TensorLayerX Tensor Operations
119121
--------------------------------
@@ -548,4 +550,12 @@ tensor_scatter_nd_update
548550

549551
diag
550552
^^^^^^^^^^^^^^^^^^^^^^^
551-
.. autofunction:: diag
553+
.. autofunction:: diag
554+
555+
mask_select
556+
^^^^^^^^^^^^^^^^^^^^^^^
557+
.. autofunction:: mask_select
558+
559+
eye
560+
^^^^^^^^^^^^^^^^^^^^^^^
561+
.. autofunction:: eye

docs/user/get_start_advance.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,8 @@ The full implementation is as follow, which supports both automatic inference in
9595
With TensorLayerx
9696
9797
>>> net = tlx.nn.Input([100, 50], name='input')
98-
>>> linear = tlx.nn.Linear(out_features=800, act=tlx.ReLU, in_features=50, name='linear_1')
99-
>>> tensor = tlx.nn.Linear(out_features=800, act=tlx.ReLU, name='linear_2')(net)
98+
>>> linear = tlx.nn.Linear(out_features=800, act=tlx.nn.ReLU, in_features=50, name='linear_1')
99+
>>> tensor = tlx.nn.Linear(out_features=800, act=tlx.nn.ReLU, name='linear_2')(net)
100100
101101
Notes
102102
-----

docs/user/get_start_model.rst

+10-10
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@ Sequential model
1818
1919
def get_model():
2020
layer_list = []
21-
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=784, name='Linear1'))
22-
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=800, name='Linear2'))
23-
layer_list.append(Linear(out_features=10, act=tlx.ReLU, in_features=800, name='Linear3'))
21+
layer_list.append(Linear(out_features=800, act=tlx.nn.ReLU, in_features=784, name='Linear1'))
22+
layer_list.append(Linear(out_features=800, act=tlx.nn.ReLU, in_features=800, name='Linear2'))
23+
layer_list.append(Linear(out_features=10, act=tlx.nn.ReLU, in_features=800, name='Linear3'))
2424
MLP = Sequential(layer_list)
2525
return MLP
2626
@@ -43,9 +43,9 @@ In this case, you need to manually input the output shape of the previous layer
4343
super(CustomModel, self).__init__()
4444
4545
self.dropout1 = Dropout(p=0.2)
46-
self.linear1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
46+
self.linear1 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=784)
4747
self.dropout2 = Dropout(p=0.2)
48-
self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
48+
self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=800)
4949
self.dropout3 = Dropout(p=0.2)
5050
self.linear3 = Linear(out_features=10, act=None, in_features=800)
5151
@@ -83,9 +83,9 @@ In this case, you do not manually input the output shape of the previous layer t
8383
super(CustomModel, self).__init__()
8484
8585
self.dropout1 = Dropout(p=0.2)
86-
self.linear1 = Linear(out_features=800, act=tlx.ReLU)
86+
self.linear1 = Linear(out_features=800, act=tlx.nn.ReLU)
8787
self.dropout2 = Dropout(p=0.2)
88-
self.linear2 = Linear(out_features=800, act=tlx.ReLU)
88+
self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU)
8989
self.dropout3 = Dropout(p=0.2)
9090
self.linear3 = Linear(out_features=10, act=None)
9191
@@ -135,9 +135,9 @@ For dynamic model, call the layer multiple time in forward function
135135
class MyModel(Module):
136136
def __init__(self):
137137
super(MyModel, self).__init__()
138-
self.linear_shared = Linear(out_features=800, act=tlx.ReLU, in_features=784)
139-
self.linear1 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
140-
self.linear2 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
138+
self.linear_shared = Linear(out_features=800, act=tlx.nn.ReLU, in_features=784)
139+
self.linear1 = Linear(out_features=10, act=tlx.nn.ReLU, in_features=800)
140+
self.linear2 = Linear(out_features=10, act=tlx.nn.ReLU, in_features=800)
141141
self.cat = Concat()
142142
143143
def forward(self, x):

examples/basic_tutorials/automatic_inference_input_shape.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,11 @@ def __init__(self):
2020
super(CustomModel, self).__init__()
2121
self.dropout1 = Dropout(p=0.2)
2222
self.linear1 = Linear(out_features=800)
23-
self.batchnorm = BatchNorm1d(act=tlx.ReLU)
23+
self.batchnorm = BatchNorm1d(act=tlx.nn.ReLU)
2424
self.dropout2 = Dropout(p=0.2)
25-
self.linear2 = Linear(out_features=800, act=tlx.ReLU)
25+
self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU)
2626
self.dropout3 = Dropout(p=0.2)
27-
self.linear3 = Linear(out_features=10, act=tlx.ReLU)
27+
self.linear3 = Linear(out_features=10, act=tlx.nn.ReLU)
2828

2929
def forward(self, x, foo=None):
3030
z = self.dropout1(x)

examples/basic_tutorials/cifar10_cnn.py

+17-17
Original file line numberDiff line numberDiff line change
@@ -33,17 +33,17 @@ def __init__(self):
3333
b_init2 = tlx.nn.initializers.constant(value=0.1)
3434

3535
self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3)
36-
self.bn = BatchNorm2d(num_features=64, act=tlx.ReLU)
36+
self.bn = BatchNorm2d(num_features=64, act=tlx.nn.ReLU)
3737
self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')
3838

3939
self.conv2 = Conv2d(
40-
64, (5, 5), (1, 1), padding='SAME', act=tlx.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64
40+
64, (5, 5), (1, 1), padding='SAME', act=tlx.nn.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64
4141
)
4242
self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
4343

4444
self.flatten = Flatten(name='flatten')
45-
self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_features=2304)
46-
self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_features=384)
45+
self.linear1 = Linear(384, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_features=2304)
46+
self.linear2 = Linear(192, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_features=384)
4747
self.linear3 = Linear(10, act=None, W_init=W_init2, name='output', in_features=192)
4848

4949
def forward(self, x):
@@ -179,17 +179,17 @@ def forward(self, data, label):
179179
# b_init2 = tlx.nn.initializers.constant(value=0.1)
180180
#
181181
# self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3)
182-
# self.bn = BatchNorm2d(num_features=64, act=tlx.ReLU)
182+
# self.bn = BatchNorm2d(num_features=64, act=tlx.nn.ReLU)
183183
# self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')
184184
#
185185
# self.conv2 = Conv2d(
186-
# 64, (5, 5), (1, 1), padding='SAME', act=tlx.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64
186+
# 64, (5, 5), (1, 1), padding='SAME', act=tlx.nn.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64
187187
# )
188188
# self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
189189
#
190190
# self.flatten = Flatten(name='flatten')
191-
# self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_channels=2304)
192-
# self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_channels=384)
191+
# self.linear1 = Linear(384, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_channels=2304)
192+
# self.linear2 = Linear(192, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_channels=384)
193193
# self.linear3 = Linear(10, act=None, W_init=W_init2, name='output', in_channels=192)
194194
#
195195
# def forward(self, x):
@@ -364,18 +364,18 @@ def forward(self, data, label):
364364
# def __init__(self):
365365
# super(CNN, self).__init__()
366366
# self.conv1 = Conv2d(
367-
# 64, (5, 5), (2, 2), b_init=None, name='conv1', in_channels=3, act=tlx.ReLU, data_format='channels_first'
367+
# 64, (5, 5), (2, 2), b_init=None, name='conv1', in_channels=3, act=tlx.nn.ReLU, data_format='channels_first'
368368
# )
369-
# self.bn = BatchNorm2d(num_features=64, act=tlx.ReLU, data_format='channels_first')
369+
# self.bn = BatchNorm2d(num_features=64, act=tlx.nn.ReLU, data_format='channels_first')
370370
# self.maxpool1 = MaxPool2d((3, 3), (2, 2), name='pool1', data_format='channels_first')
371371
# self.conv2 = Conv2d(
372-
# 128, (5, 5), (2, 2), act=tlx.ReLU, b_init=None, name='conv2', in_channels=64, data_format='channels_first'
372+
# 128, (5, 5), (2, 2), act=tlx.nn.ReLU, b_init=None, name='conv2', in_channels=64, data_format='channels_first'
373373
# )
374374
# self.maxpool2 = MaxPool2d((3, 3), (2, 2), name='pool2', data_format='channels_first')
375375
#
376376
# self.flatten = Flatten(name='flatten')
377-
# self.linear1 = Linear(120, act=tlx.ReLU, name='linear1relu', in_channels=512)
378-
# self.linear2 = Linear(84, act=tlx.ReLU, name='linear2relu', in_channels=120)
377+
# self.linear1 = Linear(120, act=tlx.nn.ReLU, name='linear1relu', in_channels=512)
378+
# self.linear2 = Linear(84, act=tlx.nn.ReLU, name='linear2relu', in_channels=120)
379379
# self.linear3 = Linear(10, act=None, name='output', in_channels=84)
380380
#
381381
# def forward(self, x):
@@ -509,18 +509,18 @@ def forward(self, data, label):
509509
# b_init2 = tlx.nn.initializers.constant(value=0.1)
510510
#
511511
# self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3)
512-
# self.bn1 = BatchNorm2d(num_features=64, act=tlx.ReLU)
512+
# self.bn1 = BatchNorm2d(num_features=64, act=tlx.nn.ReLU)
513513
# self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')
514514
#
515515
# self.conv2 = Conv2d(
516516
# 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv2', in_channels=64
517517
# )
518-
# self.bn2 = BatchNorm2d(num_features=64, act=tlx.ReLU)
518+
# self.bn2 = BatchNorm2d(num_features=64, act=tlx.nn.ReLU)
519519
# self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
520520
#
521521
# self.flatten = Flatten(name='flatten')
522-
# self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_channels=2304)
523-
# self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_channels=384)
522+
# self.linear1 = Linear(384, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_channels=2304)
523+
# self.linear2 = Linear(192, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_channels=384)
524524
# self.linear3 = Linear(10, act=None, W_init=W_init2, name='output', in_channels=192)
525525
#
526526
# def forward(self, x):

examples/basic_tutorials/gradient_clip_mixed_tensorflow.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ class CustomModel(Module):
2222
def __init__(self):
2323
super(CustomModel, self).__init__()
2424
self.linear1 = Linear(out_features=800, in_features=784)
25-
self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
26-
self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
25+
self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=800)
26+
self.linear3 = Linear(out_features=10, act=tlx.nn.ReLU, in_features=800)
2727

2828
def forward(self, x, foo=None):
2929
z = self.linear1(x)

examples/basic_tutorials/imdb_LSTM_simple.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def __init__(self):
4343
super(ImdbNet, self).__init__()
4444
self.embedding = Embedding(num_embeddings=vocab_size, embedding_dim=64)
4545
self.lstm = LSTM(input_size=64, hidden_size=64)
46-
self.linear1 = Linear(in_features=64, out_features=64, act=tlx.ReLU)
46+
self.linear1 = Linear(in_features=64, out_features=64, act=tlx.nn.ReLU)
4747
self.linear2 = Linear(in_features=64, out_features=2)
4848

4949
def forward(self, x):

examples/basic_tutorials/mnist_Sequential.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
from tensorlayerx.dataflow import Dataset
1212

1313
layer_list = []
14-
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=784, name='linear1'))
15-
layer_list.append(Linear(out_features=800, act=tlx.ReLU, in_features=800, name='linear2'))
16-
layer_list.append(Linear(out_features=10, act=tlx.ReLU, in_features=800, name='linear3'))
14+
layer_list.append(Linear(out_features=800, act=tlx.nn.ReLU, in_features=784, name='linear1'))
15+
layer_list.append(Linear(out_features=800, act=tlx.nn.ReLU, in_features=800, name='linear2'))
16+
layer_list.append(Linear(out_features=10, act=tlx.nn.ReLU, in_features=800, name='linear3'))
1717
MLP = Sequential(layer_list)
1818

1919
X_train, y_train, X_val, y_val, X_test, y_test = tlx.files.load_mnist_dataset(shape=(-1, 784))

examples/basic_tutorials/mnist_dataflow.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,8 @@ class MLP(Module):
6161

6262
def __init__(self):
6363
super(MLP, self).__init__()
64-
self.linear1 = Linear(out_features=120, in_features=784, act=tlx.ReLU)
65-
self.linear2 = Linear(out_features=84, in_features=120, act=tlx.ReLU)
64+
self.linear1 = Linear(out_features=120, in_features=784, act=tlx.nn.ReLU)
65+
self.linear2 = Linear(out_features=84, in_features=120, act=tlx.nn.ReLU)
6666
self.linear3 = Linear(out_features=10, in_features=84)
6767
self.flatten = Flatten()
6868

examples/basic_tutorials/mnist_gan.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,9 @@ class generator(Module):
4141

4242
def __init__(self):
4343
super(generator, self).__init__()
44-
self.g_fc1 = Linear(out_features=256, in_features=100, act=tlx.ReLU)
45-
self.g_fc2 = Linear(out_features=256, in_features=256, act=tlx.ReLU)
46-
self.g_fc3 = Linear(out_features=784, in_features=256, act=tlx.Tanh)
44+
self.g_fc1 = Linear(out_features=256, in_features=100, act=tlx.nn.ReLU)
45+
self.g_fc2 = Linear(out_features=256, in_features=256, act=tlx.nn.ReLU)
46+
self.g_fc3 = Linear(out_features=784, in_features=256, act=tlx.nn.Tanh)
4747

4848
def forward(self, x):
4949
out = self.g_fc1(x)

examples/basic_tutorials/mnist_mlp.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,11 @@ class CustomModel(Module):
3636
def __init__(self):
3737
super(CustomModel, self).__init__()
3838
self.dropout1 = Dropout(p=0.2)
39-
self.linear1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
39+
self.linear1 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=784)
4040
self.dropout2 = Dropout(p=0.2)
41-
self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
41+
self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=800)
4242
self.dropout3 = Dropout(p=0.2)
43-
self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
43+
self.linear3 = Linear(out_features=10, act=tlx.nn.ReLU, in_features=800)
4444

4545
def forward(self, x, foo=None):
4646
z = self.dropout1(x)
@@ -92,11 +92,11 @@ def forward(self, x, foo=None):
9292
# super(CustomModel, self).__init__()
9393
# self.dropout1 = Dropout(p=0.2)
9494
# self.linear1 = Linear(out_features=800, in_features=784)
95-
# self.batchnorm = BatchNorm1d(act=tlx.ReLU, num_features=800)
95+
# self.batchnorm = BatchNorm1d(act=tlx.nn.ReLU, num_features=800)
9696
# self.dropout2 = Dropout(p=0.2)
97-
# self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
97+
# self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=800)
9898
# self.dropout3 = Dropout(p=0.2)
99-
# self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
99+
# self.linear3 = Linear(out_features=10, act=tlx.nn.ReLU, in_features=800)
100100
#
101101
# def forward(self, x, foo=None):
102102
# z = self.dropout1(x)
@@ -187,9 +187,9 @@ def forward(self, x, foo=None):
187187
#
188188
# def __init__(self):
189189
# super(MLP, self).__init__()
190-
# self.linear1 = Linear(out_features=800, act=tlx.ReLU, in_features=784)
191-
# self.linear2 = Linear(out_features=800, act=tlx.ReLU, in_features=800)
192-
# self.linear3 = Linear(out_features=10, act=tlx.ReLU, in_features=800)
190+
# self.linear1 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=784)
191+
# self.linear2 = Linear(out_features=800, act=tlx.nn.ReLU, in_features=800)
192+
# self.linear3 = Linear(out_features=10, act=tlx.nn.ReLU, in_features=800)
193193
#
194194
# def forward(self, x):
195195
# z = self.linear1(x)
@@ -280,8 +280,8 @@ def forward(self, x, foo=None):
280280
#
281281
# def __init__(self):
282282
# super(MLP, self).__init__()
283-
# self.linear1 = Linear(out_features=120, in_features=784, act=tlx.ReLU)
284-
# self.linear2 = Linear(out_features=84, in_features=120, act=tlx.ReLU)
283+
# self.linear1 = Linear(out_features=120, in_features=784, act=tlx.nn.ReLU)
284+
# self.linear2 = Linear(out_features=84, in_features=120, act=tlx.nn.ReLU)
285285
# self.linear3 = Linear(out_features=10, in_features=84)
286286
# self.flatten = Flatten()
287287
#

examples/basic_tutorials/nested_usage_of_layer.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -47,19 +47,19 @@ def __init__(self):
4747
b_init2 = tlx.nn.initializers.constant(value=0.1)
4848

4949
self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3)
50-
self.bn = BatchNorm2d(num_features=64, act=tlx.ReLU)
50+
self.bn = BatchNorm2d(num_features=64, act=tlx.nn.ReLU)
5151
self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')
5252

5353
self.conv2 = Conv2d(
54-
64, (5, 5), (1, 1), padding='SAME', act=tlx.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64
54+
64, (5, 5), (1, 1), padding='SAME', act=tlx.nn.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64
5555
)
5656
self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')
5757

5858
self.flatten = Flatten(name='flatten')
59-
self.linear1 = Linear(384, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_features=2304)
59+
self.linear1 = Linear(384, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear1relu', in_features=2304)
6060
self.linear_add = self.make_layer(in_channel=384)
6161

62-
self.linear2 = Linear(192, act=tlx.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_features=384)
62+
self.linear2 = Linear(192, act=tlx.nn.ReLU, W_init=W_init2, b_init=b_init2, name='linear2relu', in_features=384)
6363
self.linear3 = Linear(10, act=None, W_init=W_init2, name='output', in_features=192)
6464

6565
def forward(self, x):

0 commit comments

Comments
 (0)