@@ -28,6 +28,7 @@ struct nf_flow_key {
28
28
struct flow_dissector_key_basic basic ;
29
29
union {
30
30
struct flow_dissector_key_ipv4_addrs ipv4 ;
31
+ struct flow_dissector_key_ipv6_addrs ipv6 ;
31
32
};
32
33
struct flow_dissector_key_tcp tcp ;
33
34
struct flow_dissector_key_ports tp ;
@@ -57,6 +58,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
57
58
NF_FLOW_DISSECTOR (match , FLOW_DISSECTOR_KEY_CONTROL , control );
58
59
NF_FLOW_DISSECTOR (match , FLOW_DISSECTOR_KEY_BASIC , basic );
59
60
NF_FLOW_DISSECTOR (match , FLOW_DISSECTOR_KEY_IPV4_ADDRS , ipv4 );
61
+ NF_FLOW_DISSECTOR (match , FLOW_DISSECTOR_KEY_IPV6_ADDRS , ipv6 );
60
62
NF_FLOW_DISSECTOR (match , FLOW_DISSECTOR_KEY_TCP , tcp );
61
63
NF_FLOW_DISSECTOR (match , FLOW_DISSECTOR_KEY_PORTS , tp );
62
64
@@ -69,9 +71,18 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
69
71
key -> ipv4 .dst = tuple -> dst_v4 .s_addr ;
70
72
mask -> ipv4 .dst = 0xffffffff ;
71
73
break ;
74
+ case AF_INET6 :
75
+ key -> control .addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS ;
76
+ key -> basic .n_proto = htons (ETH_P_IPV6 );
77
+ key -> ipv6 .src = tuple -> src_v6 ;
78
+ memset (& mask -> ipv6 .src , 0xff , sizeof (mask -> ipv6 .src ));
79
+ key -> ipv6 .dst = tuple -> dst_v6 ;
80
+ memset (& mask -> ipv6 .dst , 0xff , sizeof (mask -> ipv6 .dst ));
81
+ break ;
72
82
default :
73
83
return - EOPNOTSUPP ;
74
84
}
85
+ match -> dissector .used_keys |= BIT (key -> control .addr_type );
75
86
mask -> basic .n_proto = 0xffff ;
76
87
77
88
switch (tuple -> l4proto ) {
@@ -96,14 +107,13 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
96
107
97
108
match -> dissector .used_keys |= BIT (FLOW_DISSECTOR_KEY_CONTROL ) |
98
109
BIT (FLOW_DISSECTOR_KEY_BASIC ) |
99
- BIT (FLOW_DISSECTOR_KEY_IPV4_ADDRS ) |
100
110
BIT (FLOW_DISSECTOR_KEY_PORTS );
101
111
return 0 ;
102
112
}
103
113
104
114
static void flow_offload_mangle (struct flow_action_entry * entry ,
105
- enum flow_action_mangle_base htype ,
106
- u32 offset , u8 * value , u8 * mask )
115
+ enum flow_action_mangle_base htype , u32 offset ,
116
+ const __be32 * value , const __be32 * mask )
107
117
{
108
118
entry -> id = FLOW_ACTION_MANGLE ;
109
119
entry -> mangle .htype = htype ;
@@ -140,12 +150,12 @@ static int flow_offload_eth_src(struct net *net,
140
150
memcpy (& val16 , dev -> dev_addr , 2 );
141
151
val = val16 << 16 ;
142
152
flow_offload_mangle (entry0 , FLOW_ACT_MANGLE_HDR_TYPE_ETH , 4 ,
143
- ( u8 * ) & val , ( u8 * ) & mask );
153
+ & val , & mask );
144
154
145
155
mask = ~0xffffffff ;
146
156
memcpy (& val , dev -> dev_addr + 2 , 4 );
147
157
flow_offload_mangle (entry1 , FLOW_ACT_MANGLE_HDR_TYPE_ETH , 8 ,
148
- ( u8 * ) & val , ( u8 * ) & mask );
158
+ & val , & mask );
149
159
dev_put (dev );
150
160
151
161
return 0 ;
@@ -170,13 +180,13 @@ static int flow_offload_eth_dst(struct net *net,
170
180
mask = ~0xffffffff ;
171
181
memcpy (& val , n -> ha , 4 );
172
182
flow_offload_mangle (entry0 , FLOW_ACT_MANGLE_HDR_TYPE_ETH , 0 ,
173
- ( u8 * ) & val , ( u8 * ) & mask );
183
+ & val , & mask );
174
184
175
185
mask = ~0x0000ffff ;
176
186
memcpy (& val16 , n -> ha + 4 , 2 );
177
187
val = val16 ;
178
188
flow_offload_mangle (entry1 , FLOW_ACT_MANGLE_HDR_TYPE_ETH , 4 ,
179
- ( u8 * ) & val , ( u8 * ) & mask );
189
+ & val , & mask );
180
190
neigh_release (n );
181
191
182
192
return 0 ;
@@ -206,7 +216,7 @@ static void flow_offload_ipv4_snat(struct net *net,
206
216
}
207
217
208
218
flow_offload_mangle (entry , FLOW_ACT_MANGLE_HDR_TYPE_IP4 , offset ,
209
- ( u8 * ) & addr , ( u8 * ) & mask );
219
+ & addr , & mask );
210
220
}
211
221
212
222
static void flow_offload_ipv4_dnat (struct net * net ,
@@ -233,21 +243,20 @@ static void flow_offload_ipv4_dnat(struct net *net,
233
243
}
234
244
235
245
flow_offload_mangle (entry , FLOW_ACT_MANGLE_HDR_TYPE_IP4 , offset ,
236
- ( u8 * ) & addr , ( u8 * ) & mask );
246
+ & addr , & mask );
237
247
}
238
248
239
249
static void flow_offload_ipv6_mangle (struct nf_flow_rule * flow_rule ,
240
250
unsigned int offset ,
241
- u8 * addr , u8 * mask )
251
+ const __be32 * addr , const __be32 * mask )
242
252
{
243
253
struct flow_action_entry * entry ;
244
254
int i ;
245
255
246
256
for (i = 0 ; i < sizeof (struct in6_addr ) / sizeof (u32 ); i += sizeof (u32 )) {
247
257
entry = flow_action_entry_next (flow_rule );
248
258
flow_offload_mangle (entry , FLOW_ACT_MANGLE_HDR_TYPE_IP6 ,
249
- offset + i ,
250
- & addr [i ], mask );
259
+ offset + i , & addr [i ], mask );
251
260
}
252
261
}
253
262
@@ -257,23 +266,23 @@ static void flow_offload_ipv6_snat(struct net *net,
257
266
struct nf_flow_rule * flow_rule )
258
267
{
259
268
u32 mask = ~htonl (0xffffffff );
260
- const u8 * addr ;
269
+ const __be32 * addr ;
261
270
u32 offset ;
262
271
263
272
switch (dir ) {
264
273
case FLOW_OFFLOAD_DIR_ORIGINAL :
265
- addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .dst_v6 .s6_addr ;
274
+ addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .dst_v6 .s6_addr32 ;
266
275
offset = offsetof(struct ipv6hdr , saddr );
267
276
break ;
268
277
case FLOW_OFFLOAD_DIR_REPLY :
269
- addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .src_v6 .s6_addr ;
278
+ addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .src_v6 .s6_addr32 ;
270
279
offset = offsetof(struct ipv6hdr , daddr );
271
280
break ;
272
281
default :
273
282
return ;
274
283
}
275
284
276
- flow_offload_ipv6_mangle (flow_rule , offset , ( u8 * ) addr , ( u8 * ) & mask );
285
+ flow_offload_ipv6_mangle (flow_rule , offset , addr , & mask );
277
286
}
278
287
279
288
static void flow_offload_ipv6_dnat (struct net * net ,
@@ -282,23 +291,23 @@ static void flow_offload_ipv6_dnat(struct net *net,
282
291
struct nf_flow_rule * flow_rule )
283
292
{
284
293
u32 mask = ~htonl (0xffffffff );
285
- const u8 * addr ;
294
+ const __be32 * addr ;
286
295
u32 offset ;
287
296
288
297
switch (dir ) {
289
298
case FLOW_OFFLOAD_DIR_ORIGINAL :
290
- addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .src_v6 .s6_addr ;
299
+ addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .src_v6 .s6_addr32 ;
291
300
offset = offsetof(struct ipv6hdr , daddr );
292
301
break ;
293
302
case FLOW_OFFLOAD_DIR_REPLY :
294
- addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .dst_v6 .s6_addr ;
303
+ addr = flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .dst_v6 .s6_addr32 ;
295
304
offset = offsetof(struct ipv6hdr , saddr );
296
305
break ;
297
306
default :
298
307
return ;
299
308
}
300
309
301
- flow_offload_ipv6_mangle (flow_rule , offset , ( u8 * ) addr , ( u8 * ) & mask );
310
+ flow_offload_ipv6_mangle (flow_rule , offset , addr , & mask );
302
311
}
303
312
304
313
static int flow_offload_l4proto (const struct flow_offload * flow )
@@ -326,25 +335,24 @@ static void flow_offload_port_snat(struct net *net,
326
335
struct nf_flow_rule * flow_rule )
327
336
{
328
337
struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
329
- u32 mask = ~htonl (0xffff0000 );
330
- __be16 port ;
338
+ u32 mask = ~htonl (0xffff0000 ), port ;
331
339
u32 offset ;
332
340
333
341
switch (dir ) {
334
342
case FLOW_OFFLOAD_DIR_ORIGINAL :
335
- port = flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .dst_port ;
343
+ port = ntohs ( flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .dst_port ) ;
336
344
offset = 0 ; /* offsetof(struct tcphdr, source); */
337
345
break ;
338
346
case FLOW_OFFLOAD_DIR_REPLY :
339
- port = flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .src_port ;
347
+ port = ntohs ( flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .src_port ) ;
340
348
offset = 0 ; /* offsetof(struct tcphdr, dest); */
341
349
break ;
342
350
default :
343
- break ;
351
+ return ;
344
352
}
345
-
353
+ port = htonl ( port << 16 );
346
354
flow_offload_mangle (entry , flow_offload_l4proto (flow ), offset ,
347
- ( u8 * ) & port , ( u8 * ) & mask );
355
+ & port , & mask );
348
356
}
349
357
350
358
static void flow_offload_port_dnat (struct net * net ,
@@ -353,25 +361,24 @@ static void flow_offload_port_dnat(struct net *net,
353
361
struct nf_flow_rule * flow_rule )
354
362
{
355
363
struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
356
- u32 mask = ~htonl (0xffff );
357
- __be16 port ;
364
+ u32 mask = ~htonl (0xffff ), port ;
358
365
u32 offset ;
359
366
360
367
switch (dir ) {
361
368
case FLOW_OFFLOAD_DIR_ORIGINAL :
362
- port = flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .dst_port ;
369
+ port = ntohs ( flow -> tuplehash [FLOW_OFFLOAD_DIR_REPLY ].tuple .dst_port ) ;
363
370
offset = 0 ; /* offsetof(struct tcphdr, source); */
364
371
break ;
365
372
case FLOW_OFFLOAD_DIR_REPLY :
366
- port = flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .src_port ;
373
+ port = ntohs ( flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .src_port ) ;
367
374
offset = 0 ; /* offsetof(struct tcphdr, dest); */
368
375
break ;
369
376
default :
370
- break ;
377
+ return ;
371
378
}
372
-
379
+ port = htonl ( port );
373
380
flow_offload_mangle (entry , flow_offload_l4proto (flow ), offset ,
374
- ( u8 * ) & port , ( u8 * ) & mask );
381
+ & port , & mask );
375
382
}
376
383
377
384
static void flow_offload_ipv4_checksum (struct net * net ,
@@ -574,7 +581,7 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
574
581
cls_flow .rule = flow_rule -> rule ;
575
582
576
583
list_for_each_entry (block_cb , & flowtable -> flow_block .cb_list , list ) {
577
- err = block_cb -> cb (TC_SETUP_FT , & cls_flow ,
584
+ err = block_cb -> cb (TC_SETUP_CLSFLOWER , & cls_flow ,
578
585
block_cb -> cb_priv );
579
586
if (err < 0 )
580
587
continue ;
@@ -599,7 +606,7 @@ static void flow_offload_tuple_del(struct flow_offload_work *offload,
599
606
& offload -> flow -> tuplehash [dir ].tuple , & extack );
600
607
601
608
list_for_each_entry (block_cb , & flowtable -> flow_block .cb_list , list )
602
- block_cb -> cb (TC_SETUP_FT , & cls_flow , block_cb -> cb_priv );
609
+ block_cb -> cb (TC_SETUP_CLSFLOWER , & cls_flow , block_cb -> cb_priv );
603
610
604
611
offload -> flow -> flags |= FLOW_OFFLOAD_HW_DEAD ;
605
612
}
@@ -656,7 +663,7 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload,
656
663
& offload -> flow -> tuplehash [dir ].tuple , & extack );
657
664
658
665
list_for_each_entry (block_cb , & flowtable -> flow_block .cb_list , list )
659
- block_cb -> cb (TC_SETUP_FT , & cls_flow , block_cb -> cb_priv );
666
+ block_cb -> cb (TC_SETUP_CLSFLOWER , & cls_flow , block_cb -> cb_priv );
660
667
memcpy (stats , & cls_flow .stats , sizeof (* stats ));
661
668
}
662
669
@@ -822,7 +829,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
822
829
bo .extack = & extack ;
823
830
INIT_LIST_HEAD (& bo .cb_list );
824
831
825
- err = dev -> netdev_ops -> ndo_setup_tc (dev , TC_SETUP_BLOCK , & bo );
832
+ err = dev -> netdev_ops -> ndo_setup_tc (dev , TC_SETUP_FT , & bo );
826
833
if (err < 0 )
827
834
return err ;
828
835
0 commit comments