6
6
from .harness import DispatchTestCase
7
7
8
8
9
- class TestLayerNormConverter (DispatchTestCase ):
9
+ class TestNativeLayerNormConverter (DispatchTestCase ):
10
10
@parameterized .expand (
11
11
[
12
- (
13
- (5 , 3 , 2 , 4 ),
14
- [
15
- 4 ,
16
- ],
17
- ),
18
- ((5 , 3 , 2 , 4 ), [2 , 4 ]),
19
- ((5 , 3 , 2 , 4 ), [3 , 2 , 4 ]),
20
- ((5 , 3 , 2 , 4 ), [5 , 3 , 2 , 4 ]),
12
+ ((2 , 4 , 6 ), [6 ]),
13
+ ((2 , 4 , 6 ), [4 , 6 ]),
14
+ ((2 , 4 , 6 ), [2 , 4 , 6 ]),
21
15
]
22
16
)
23
- def test_layer_norm (self , input_shape , normalized_shape , eps = 1e-05 ):
17
+ def test_layer_norm_1d (self , input_shape , normalized_shape ):
24
18
class LayerNorm (torch .nn .Module ):
25
19
def forward (self , x ):
26
- return torch .ops .aten .layer_norm .default (
27
- x ,
28
- normalized_shape ,
29
- torch .randn (normalized_shape ),
30
- torch .randn (normalized_shape ),
31
- eps ,
32
- )
20
+ return torch .ops .aten .native_layer_norm .default (
21
+ x , normalized_shape , None , None , 1e-05
22
+ )[0 ]
33
23
34
24
inputs = [torch .randn (input_shape )]
35
- self .run_test (
36
- LayerNorm (),
37
- inputs ,
38
- )
39
-
25
+ self .run_test (LayerNorm (), inputs , use_dynamo_tracer = True )
40
26
41
- class TestNativeLayerNormConverter (DispatchTestCase ):
42
27
@parameterized .expand (
43
28
[
44
- (
45
- (5 , 3 , 2 , 4 ),
46
- [
47
- 4 ,
48
- ],
49
- ),
29
+ ((5 , 3 , 2 , 4 ), [4 ]),
50
30
((5 , 3 , 2 , 4 ), [2 , 4 ]),
51
31
((5 , 3 , 2 , 4 ), [3 , 2 , 4 ]),
52
32
((5 , 3 , 2 , 4 ), [5 , 3 , 2 , 4 ]),
53
33
]
54
34
)
55
- def test_layer_norm (self , input_shape , normalized_shape , eps = 1e-05 ):
35
+ def test_layer_norm_2d (self , input_shape , normalized_shape ):
56
36
class LayerNorm (torch .nn .Module ):
57
- def forward (self , x ):
37
+ def forward (self , x , weight , bias ):
58
38
return torch .ops .aten .native_layer_norm .default (
59
- x ,
60
- normalized_shape ,
61
- torch .randn (normalized_shape ),
62
- torch .randn (normalized_shape ),
63
- eps ,
39
+ x , normalized_shape , weight , bias , 1e-05
64
40
)[0 ]
65
41
66
- inputs = [torch .randn (input_shape )]
67
- self .run_test (
68
- LayerNorm (),
69
- inputs ,
70
- )
42
+ inputs = [
43
+ torch .randn (input_shape ),
44
+ torch .randn (normalized_shape ),
45
+ torch .randn (normalized_shape ),
46
+ ]
47
+ self .run_test (LayerNorm (), inputs , use_dynamo_tracer = True )
71
48
72
49
def test_layernorm_with_dynamic_shape (self ):
73
50
class LayerNorm (torch .nn .Module ):
74
- def forward (self , x ):
51
+ def forward (self , x , weight , bias ):
75
52
return torch .ops .aten .native_layer_norm .default (
76
- x ,
77
- torch .tensor ([3 , 224 , 224 ]),
78
- torch .ones ((3 , 224 , 224 )),
79
- torch .zeros ((3 , 224 , 224 )),
80
- 1e-05 ,
53
+ x , [3 , 224 , 224 ], weight , bias , 1e-05
81
54
)[0 ]
82
55
83
56
input_specs = [
@@ -87,22 +60,19 @@ def forward(self, x):
87
60
opt_shape = (5 , 3 , 224 , 224 ),
88
61
max_shape = (10 , 3 , 224 , 224 ),
89
62
),
63
+ Input (dtype = torch .float32 , shape = (3 , 224 , 224 )),
64
+ Input (dtype = torch .float32 , shape = (3 , 224 , 224 )),
90
65
]
91
66
92
67
self .run_test_with_dynamic_shape (
93
- LayerNorm (),
94
- input_specs ,
68
+ LayerNorm (), input_specs , use_dynamo_tracer = True
95
69
)
96
70
97
71
def test_layernorm_with_dynamic_shape_1 (self ):
98
72
class LayerNorm (torch .nn .Module ):
99
- def forward (self , x ):
73
+ def forward (self , x , weight , bias ):
100
74
return torch .ops .aten .native_layer_norm .default (
101
- x ,
102
- torch .tensor ([3 ]),
103
- torch .ones ((3 )),
104
- torch .zeros ((3 )),
105
- 1e-05 ,
75
+ x , [3 ], weight , bias , 1e-05
106
76
)[0 ]
107
77
108
78
input_specs = [
@@ -112,29 +82,12 @@ def forward(self, x):
112
82
opt_shape = (3 , 3 , 3 ),
113
83
max_shape = (4 , 5 , 3 ),
114
84
),
85
+ Input (dtype = torch .float32 , shape = (3 ,)),
86
+ Input (dtype = torch .float32 , shape = (3 ,)),
115
87
]
116
88
117
89
self .run_test_with_dynamic_shape (
118
- LayerNorm (),
119
- input_specs ,
120
- )
121
-
122
- @parameterized .expand ([((5 , 3 , 2 , 4 ), [2 , 4 ])])
123
- def test_layer_norm_without_Scaling (self , input_shape , normalized_shape , eps = 1e-05 ):
124
- class LayerNorm (torch .nn .Module ):
125
- def forward (self , x ):
126
- return torch .ops .aten .native_layer_norm .default (
127
- x ,
128
- normalized_shape ,
129
- None ,
130
- None ,
131
- eps ,
132
- )[0 ]
133
-
134
- inputs = [torch .randn (input_shape )]
135
- self .run_test (
136
- LayerNorm (),
137
- inputs ,
90
+ LayerNorm (), input_specs , use_dynamo_tracer = True
138
91
)
139
92
140
93
0 commit comments