61
61
#include <linux/string.h>
62
62
#include <linux/errno.h>
63
63
#include <linux/skbuff.h>
64
+ #include <net/netevent.h>
64
65
#include <net/netlink.h>
65
66
#include <net/sch_generic.h>
66
67
#include <net/pkt_sched.h>
67
68
69
+ static LIST_HEAD (cbs_list );
70
+ static DEFINE_SPINLOCK (cbs_list_lock );
71
+
68
72
#define BYTES_PER_KBIT (1000LL / 8)
69
73
70
74
struct cbs_sched_data {
71
75
bool offload ;
72
76
int queue ;
73
- s64 port_rate ; /* in bytes/s */
77
+ atomic64_t port_rate ; /* in bytes/s */
74
78
s64 last ; /* timestamp in ns */
75
79
s64 credits ; /* in bytes */
76
80
s32 locredit ; /* in bytes */
@@ -82,6 +86,7 @@ struct cbs_sched_data {
82
86
struct sk_buff * * to_free );
83
87
struct sk_buff * (* dequeue )(struct Qdisc * sch );
84
88
struct Qdisc * qdisc ;
89
+ struct list_head cbs_list ;
85
90
};
86
91
87
92
static int cbs_child_enqueue (struct sk_buff * skb , struct Qdisc * sch ,
@@ -181,6 +186,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
181
186
s64 credits ;
182
187
int len ;
183
188
189
+ if (atomic64_read (& q -> port_rate ) == -1 ) {
190
+ WARN_ONCE (1 , "cbs: dequeue() called with unknown port rate." );
191
+ return NULL ;
192
+ }
193
+
184
194
if (q -> credits < 0 ) {
185
195
credits = timediff_to_credits (now - q -> last , q -> idleslope );
186
196
@@ -207,7 +217,8 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
207
217
/* As sendslope is a negative number, this will decrease the
208
218
* amount of q->credits.
209
219
*/
210
- credits = credits_from_len (len , q -> sendslope , q -> port_rate );
220
+ credits = credits_from_len (len , q -> sendslope ,
221
+ atomic64_read (& q -> port_rate ));
211
222
credits += q -> credits ;
212
223
213
224
q -> credits = max_t (s64 , credits , q -> locredit );
@@ -294,6 +305,50 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
294
305
return 0 ;
295
306
}
296
307
308
+ static void cbs_set_port_rate (struct net_device * dev , struct cbs_sched_data * q )
309
+ {
310
+ struct ethtool_link_ksettings ecmd ;
311
+ int port_rate = -1 ;
312
+
313
+ if (!__ethtool_get_link_ksettings (dev , & ecmd ) &&
314
+ ecmd .base .speed != SPEED_UNKNOWN )
315
+ port_rate = ecmd .base .speed * 1000 * BYTES_PER_KBIT ;
316
+
317
+ atomic64_set (& q -> port_rate , port_rate );
318
+ netdev_dbg (dev , "cbs: set %s's port_rate to: %lld, linkspeed: %d\n" ,
319
+ dev -> name , (long long )atomic64_read (& q -> port_rate ),
320
+ ecmd .base .speed );
321
+ }
322
+
323
+ static int cbs_dev_notifier (struct notifier_block * nb , unsigned long event ,
324
+ void * ptr )
325
+ {
326
+ struct net_device * dev = netdev_notifier_info_to_dev (ptr );
327
+ struct cbs_sched_data * q ;
328
+ struct net_device * qdev ;
329
+ bool found = false;
330
+
331
+ ASSERT_RTNL ();
332
+
333
+ if (event != NETDEV_UP && event != NETDEV_CHANGE )
334
+ return NOTIFY_DONE ;
335
+
336
+ spin_lock (& cbs_list_lock );
337
+ list_for_each_entry (q , & cbs_list , cbs_list ) {
338
+ qdev = qdisc_dev (q -> qdisc );
339
+ if (qdev == dev ) {
340
+ found = true;
341
+ break ;
342
+ }
343
+ }
344
+ spin_unlock (& cbs_list_lock );
345
+
346
+ if (found )
347
+ cbs_set_port_rate (dev , q );
348
+
349
+ return NOTIFY_DONE ;
350
+ }
351
+
297
352
static int cbs_change (struct Qdisc * sch , struct nlattr * opt ,
298
353
struct netlink_ext_ack * extack )
299
354
{
@@ -315,16 +370,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
315
370
qopt = nla_data (tb [TCA_CBS_PARMS ]);
316
371
317
372
if (!qopt -> offload ) {
318
- struct ethtool_link_ksettings ecmd ;
319
- s64 link_speed ;
320
-
321
- if (!__ethtool_get_link_ksettings (dev , & ecmd ))
322
- link_speed = ecmd .base .speed ;
323
- else
324
- link_speed = SPEED_1000 ;
325
-
326
- q -> port_rate = link_speed * 1000 * BYTES_PER_KBIT ;
327
-
373
+ cbs_set_port_rate (dev , q );
328
374
cbs_disable_offload (dev , q );
329
375
} else {
330
376
err = cbs_enable_offload (dev , q , qopt , extack );
@@ -347,6 +393,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
347
393
{
348
394
struct cbs_sched_data * q = qdisc_priv (sch );
349
395
struct net_device * dev = qdisc_dev (sch );
396
+ int err ;
350
397
351
398
if (!opt ) {
352
399
NL_SET_ERR_MSG (extack , "Missing CBS qdisc options which are mandatory" );
@@ -367,16 +414,29 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
367
414
368
415
qdisc_watchdog_init (& q -> watchdog , sch );
369
416
370
- return cbs_change (sch , opt , extack );
417
+ err = cbs_change (sch , opt , extack );
418
+ if (err )
419
+ return err ;
420
+
421
+ if (!q -> offload ) {
422
+ spin_lock (& cbs_list_lock );
423
+ list_add (& q -> cbs_list , & cbs_list );
424
+ spin_unlock (& cbs_list_lock );
425
+ }
426
+
427
+ return 0 ;
371
428
}
372
429
373
430
static void cbs_destroy (struct Qdisc * sch )
374
431
{
375
432
struct cbs_sched_data * q = qdisc_priv (sch );
376
433
struct net_device * dev = qdisc_dev (sch );
377
434
378
- qdisc_watchdog_cancel (& q -> watchdog );
435
+ spin_lock (& cbs_list_lock );
436
+ list_del (& q -> cbs_list );
437
+ spin_unlock (& cbs_list_lock );
379
438
439
+ qdisc_watchdog_cancel (& q -> watchdog );
380
440
cbs_disable_offload (dev , q );
381
441
382
442
if (q -> qdisc )
@@ -487,14 +547,24 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
487
547
.owner = THIS_MODULE ,
488
548
};
489
549
550
+ static struct notifier_block cbs_device_notifier = {
551
+ .notifier_call = cbs_dev_notifier ,
552
+ };
553
+
490
554
static int __init cbs_module_init (void )
491
555
{
556
+ int err = register_netdevice_notifier (& cbs_device_notifier );
557
+
558
+ if (err )
559
+ return err ;
560
+
492
561
return register_qdisc (& cbs_qdisc_ops );
493
562
}
494
563
495
564
static void __exit cbs_module_exit (void )
496
565
{
497
566
unregister_qdisc (& cbs_qdisc_ops );
567
+ unregister_netdevice_notifier (& cbs_device_notifier );
498
568
}
499
569
module_init (cbs_module_init )
500
570
module_exit (cbs_module_exit )
0 commit comments