1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/sch_generic.c Generic packet scheduler routines.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
7 * - Ingress support
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <linux/rcupdate.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/if_vlan.h>
25 #include <linux/skb_array.h>
26 #include <linux/if_macvlan.h>
27 #include <net/sch_generic.h>
28 #include <net/pkt_sched.h>
29 #include <net/dst.h>
30 #include <trace/events/qdisc.h>
31 #include <trace/events/net.h>
32 #include <net/xfrm.h>
33
34 /* Qdisc to use by default */
35 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
36 EXPORT_SYMBOL(default_qdisc_ops);
37
qdisc_maybe_clear_missed(struct Qdisc * q,const struct netdev_queue * txq)38 static void qdisc_maybe_clear_missed(struct Qdisc *q,
39 const struct netdev_queue *txq)
40 {
41 clear_bit(__QDISC_STATE_MISSED, &q->state);
42
43 /* Make sure the below netif_xmit_frozen_or_stopped()
44 * checking happens after clearing STATE_MISSED.
45 */
46 smp_mb__after_atomic();
47
48 /* Checking netif_xmit_frozen_or_stopped() again to
49 * make sure STATE_MISSED is set if the STATE_MISSED
50 * set by netif_tx_wake_queue()'s rescheduling of
51 * net_tx_action() is cleared by the above clear_bit().
52 */
53 if (!netif_xmit_frozen_or_stopped(txq))
54 set_bit(__QDISC_STATE_MISSED, &q->state);
55 else
56 set_bit(__QDISC_STATE_DRAINING, &q->state);
57 }
58
59 /* Main transmission queue. */
60
61 /* Modifications to data participating in scheduling must be protected with
62 * qdisc_lock(qdisc) spinlock.
63 *
64 * The idea is the following:
65 * - enqueue, dequeue are serialized via qdisc root lock
66 * - ingress filtering is also serialized via qdisc root lock
67 * - updates to tree and tree walking are only done under the rtnl mutex.
68 */
69
70 #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
71
__skb_dequeue_bad_txq(struct Qdisc * q)72 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
73 {
74 const struct netdev_queue *txq = q->dev_queue;
75 spinlock_t *lock = NULL;
76 struct sk_buff *skb;
77
78 if (q->flags & TCQ_F_NOLOCK) {
79 lock = qdisc_lock(q);
80 spin_lock(lock);
81 }
82
83 skb = skb_peek(&q->skb_bad_txq);
84 if (skb) {
85 /* check the reason of requeuing without tx lock first */
86 txq = skb_get_tx_queue(txq->dev, skb);
87 if (!netif_xmit_frozen_or_stopped(txq)) {
88 skb = __skb_dequeue(&q->skb_bad_txq);
89 if (qdisc_is_percpu_stats(q)) {
90 qdisc_qstats_cpu_backlog_dec(q, skb);
91 qdisc_qstats_cpu_qlen_dec(q);
92 } else {
93 qdisc_qstats_backlog_dec(q, skb);
94 q->q.qlen--;
95 }
96 } else {
97 skb = SKB_XOFF_MAGIC;
98 qdisc_maybe_clear_missed(q, txq);
99 }
100 }
101
102 if (lock)
103 spin_unlock(lock);
104
105 return skb;
106 }
107
qdisc_dequeue_skb_bad_txq(struct Qdisc * q)108 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
109 {
110 struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
111
112 if (unlikely(skb))
113 skb = __skb_dequeue_bad_txq(q);
114
115 return skb;
116 }
117
qdisc_enqueue_skb_bad_txq(struct Qdisc * q,struct sk_buff * skb)118 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
119 struct sk_buff *skb)
120 {
121 spinlock_t *lock = NULL;
122
123 if (q->flags & TCQ_F_NOLOCK) {
124 lock = qdisc_lock(q);
125 spin_lock(lock);
126 }
127
128 __skb_queue_tail(&q->skb_bad_txq, skb);
129
130 if (qdisc_is_percpu_stats(q)) {
131 qdisc_qstats_cpu_backlog_inc(q, skb);
132 qdisc_qstats_cpu_qlen_inc(q);
133 } else {
134 qdisc_qstats_backlog_inc(q, skb);
135 q->q.qlen++;
136 }
137
138 if (lock)
139 spin_unlock(lock);
140 }
141
dev_requeue_skb(struct sk_buff * skb,struct Qdisc * q)142 static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
143 {
144 spinlock_t *lock = NULL;
145
146 if (q->flags & TCQ_F_NOLOCK) {
147 lock = qdisc_lock(q);
148 spin_lock(lock);
149 }
150
151 while (skb) {
152 struct sk_buff *next = skb->next;
153
154 __skb_queue_tail(&q->gso_skb, skb);
155
156 /* it's still part of the queue */
157 if (qdisc_is_percpu_stats(q)) {
158 qdisc_qstats_cpu_requeues_inc(q);
159 qdisc_qstats_cpu_backlog_inc(q, skb);
160 qdisc_qstats_cpu_qlen_inc(q);
161 } else {
162 q->qstats.requeues++;
163 qdisc_qstats_backlog_inc(q, skb);
164 q->q.qlen++;
165 }
166
167 skb = next;
168 }
169
170 if (lock) {
171 spin_unlock(lock);
172 set_bit(__QDISC_STATE_MISSED, &q->state);
173 } else {
174 __netif_schedule(q);
175 }
176 }
177
try_bulk_dequeue_skb(struct Qdisc * q,struct sk_buff * skb,const struct netdev_queue * txq,int * packets)178 static void try_bulk_dequeue_skb(struct Qdisc *q,
179 struct sk_buff *skb,
180 const struct netdev_queue *txq,
181 int *packets)
182 {
183 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
184
185 while (bytelimit > 0) {
186 struct sk_buff *nskb = q->dequeue(q);
187
188 if (!nskb)
189 break;
190
191 bytelimit -= nskb->len; /* covers GSO len */
192 skb->next = nskb;
193 skb = nskb;
194 (*packets)++; /* GSO counts as one pkt */
195 }
196 skb_mark_not_on_list(skb);
197 }
198
199 /* This variant of try_bulk_dequeue_skb() makes sure
200 * all skbs in the chain are for the same txq
201 */
try_bulk_dequeue_skb_slow(struct Qdisc * q,struct sk_buff * skb,int * packets)202 static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
203 struct sk_buff *skb,
204 int *packets)
205 {
206 int mapping = skb_get_queue_mapping(skb);
207 struct sk_buff *nskb;
208 int cnt = 0;
209
210 do {
211 nskb = q->dequeue(q);
212 if (!nskb)
213 break;
214 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
215 qdisc_enqueue_skb_bad_txq(q, nskb);
216 break;
217 }
218 skb->next = nskb;
219 skb = nskb;
220 } while (++cnt < 8);
221 (*packets) += cnt;
222 skb_mark_not_on_list(skb);
223 }
224
225 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
226 * A requeued skb (via q->gso_skb) can also be a SKB list.
227 */
dequeue_skb(struct Qdisc * q,bool * validate,int * packets)228 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
229 int *packets)
230 {
231 const struct netdev_queue *txq = q->dev_queue;
232 struct sk_buff *skb = NULL;
233
234 *packets = 1;
235 if (unlikely(!skb_queue_empty(&q->gso_skb))) {
236 spinlock_t *lock = NULL;
237
238 if (q->flags & TCQ_F_NOLOCK) {
239 lock = qdisc_lock(q);
240 spin_lock(lock);
241 }
242
243 skb = skb_peek(&q->gso_skb);
244
245 /* skb may be null if another cpu pulls gso_skb off in between
246 * empty check and lock.
247 */
248 if (!skb) {
249 if (lock)
250 spin_unlock(lock);
251 goto validate;
252 }
253
254 /* skb in gso_skb were already validated */
255 *validate = false;
256 if (xfrm_offload(skb))
257 *validate = true;
258 /* check the reason of requeuing without tx lock first */
259 txq = skb_get_tx_queue(txq->dev, skb);
260 if (!netif_xmit_frozen_or_stopped(txq)) {
261 skb = __skb_dequeue(&q->gso_skb);
262 if (qdisc_is_percpu_stats(q)) {
263 qdisc_qstats_cpu_backlog_dec(q, skb);
264 qdisc_qstats_cpu_qlen_dec(q);
265 } else {
266 qdisc_qstats_backlog_dec(q, skb);
267 q->q.qlen--;
268 }
269 } else {
270 skb = NULL;
271 qdisc_maybe_clear_missed(q, txq);
272 }
273 if (lock)
274 spin_unlock(lock);
275 goto trace;
276 }
277 validate:
278 *validate = true;
279
280 if ((q->flags & TCQ_F_ONETXQUEUE) &&
281 netif_xmit_frozen_or_stopped(txq)) {
282 qdisc_maybe_clear_missed(q, txq);
283 return skb;
284 }
285
286 skb = qdisc_dequeue_skb_bad_txq(q);
287 if (unlikely(skb)) {
288 if (skb == SKB_XOFF_MAGIC)
289 return NULL;
290 goto bulk;
291 }
292 skb = q->dequeue(q);
293 if (skb) {
294 bulk:
295 if (qdisc_may_bulk(q))
296 try_bulk_dequeue_skb(q, skb, txq, packets);
297 else
298 try_bulk_dequeue_skb_slow(q, skb, packets);
299 }
300 trace:
301 trace_qdisc_dequeue(q, txq, *packets, skb);
302 return skb;
303 }
304
305 /*
306 * Transmit possibly several skbs, and handle the return status as
307 * required. Owning qdisc running bit guarantees that only one CPU
308 * can execute this function.
309 *
310 * Returns to the caller:
311 * false - hardware queue frozen backoff
312 * true - feel free to send more pkts
313 */
sch_direct_xmit(struct sk_buff * skb,struct Qdisc * q,struct net_device * dev,struct netdev_queue * txq,spinlock_t * root_lock,bool validate)314 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
315 struct net_device *dev, struct netdev_queue *txq,
316 spinlock_t *root_lock, bool validate)
317 {
318 int ret = NETDEV_TX_BUSY;
319 bool again = false;
320
321 /* And release qdisc */
322 if (root_lock)
323 spin_unlock(root_lock);
324
325 /* Note that we validate skb (GSO, checksum, ...) outside of locks */
326 if (validate)
327 skb = validate_xmit_skb_list(skb, dev, &again);
328
329 #ifdef CONFIG_XFRM_OFFLOAD
330 if (unlikely(again)) {
331 if (root_lock)
332 spin_lock(root_lock);
333
334 dev_requeue_skb(skb, q);
335 return false;
336 }
337 #endif
338
339 if (likely(skb)) {
340 HARD_TX_LOCK(dev, txq, smp_processor_id());
341 if (!netif_xmit_frozen_or_stopped(txq))
342 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
343 else
344 qdisc_maybe_clear_missed(q, txq);
345
346 HARD_TX_UNLOCK(dev, txq);
347 } else {
348 if (root_lock)
349 spin_lock(root_lock);
350 return true;
351 }
352
353 if (root_lock)
354 spin_lock(root_lock);
355
356 if (!dev_xmit_complete(ret)) {
357 /* Driver returned NETDEV_TX_BUSY - requeue skb */
358 if (unlikely(ret != NETDEV_TX_BUSY))
359 net_warn_ratelimited("BUG %s code %d qlen %d\n",
360 dev->name, ret, q->q.qlen);
361
362 dev_requeue_skb(skb, q);
363 return false;
364 }
365
366 return true;
367 }
368
369 /*
370 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
371 *
372 * running seqcount guarantees only one CPU can process
373 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
374 * this queue.
375 *
376 * netif_tx_lock serializes accesses to device driver.
377 *
378 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
379 * if one is grabbed, another must be free.
380 *
381 * Note, that this procedure can be called by a watchdog timer
382 *
383 * Returns to the caller:
384 * 0 - queue is empty or throttled.
385 * >0 - queue is not empty.
386 *
387 */
qdisc_restart(struct Qdisc * q,int * packets)388 static inline bool qdisc_restart(struct Qdisc *q, int *packets)
389 {
390 spinlock_t *root_lock = NULL;
391 struct netdev_queue *txq;
392 struct net_device *dev;
393 struct sk_buff *skb;
394 bool validate;
395
396 /* Dequeue packet */
397 skb = dequeue_skb(q, &validate, packets);
398 if (unlikely(!skb))
399 return false;
400
401 if (!(q->flags & TCQ_F_NOLOCK))
402 root_lock = qdisc_lock(q);
403
404 dev = qdisc_dev(q);
405 txq = skb_get_tx_queue(dev, skb);
406
407 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
408 }
409
__qdisc_run(struct Qdisc * q)410 void __qdisc_run(struct Qdisc *q)
411 {
412 int quota = READ_ONCE(dev_tx_weight);
413 int packets;
414
415 while (qdisc_restart(q, &packets)) {
416 quota -= packets;
417 if (quota <= 0) {
418 if (q->flags & TCQ_F_NOLOCK)
419 set_bit(__QDISC_STATE_MISSED, &q->state);
420 else
421 __netif_schedule(q);
422
423 break;
424 }
425 }
426 }
427
dev_trans_start(struct net_device * dev)428 unsigned long dev_trans_start(struct net_device *dev)
429 {
430 unsigned long val, res;
431 unsigned int i;
432
433 if (is_vlan_dev(dev))
434 dev = vlan_dev_real_dev(dev);
435 else if (netif_is_macvlan(dev))
436 dev = macvlan_dev_real_dev(dev);
437 res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
438 for (i = 1; i < dev->num_tx_queues; i++) {
439 val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
440 if (val && time_after(val, res))
441 res = val;
442 }
443
444 return res;
445 }
446 EXPORT_SYMBOL(dev_trans_start);
447
netif_freeze_queues(struct net_device * dev)448 static void netif_freeze_queues(struct net_device *dev)
449 {
450 unsigned int i;
451 int cpu;
452
453 cpu = smp_processor_id();
454 for (i = 0; i < dev->num_tx_queues; i++) {
455 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
456
457 /* We are the only thread of execution doing a
458 * freeze, but we have to grab the _xmit_lock in
459 * order to synchronize with threads which are in
460 * the ->hard_start_xmit() handler and already
461 * checked the frozen bit.
462 */
463 __netif_tx_lock(txq, cpu);
464 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
465 __netif_tx_unlock(txq);
466 }
467 }
468
netif_tx_lock(struct net_device * dev)469 void netif_tx_lock(struct net_device *dev)
470 {
471 spin_lock(&dev->tx_global_lock);
472 netif_freeze_queues(dev);
473 }
474 EXPORT_SYMBOL(netif_tx_lock);
475
netif_unfreeze_queues(struct net_device * dev)476 static void netif_unfreeze_queues(struct net_device *dev)
477 {
478 unsigned int i;
479
480 for (i = 0; i < dev->num_tx_queues; i++) {
481 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
482
483 /* No need to grab the _xmit_lock here. If the
484 * queue is not stopped for another reason, we
485 * force a schedule.
486 */
487 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
488 netif_schedule_queue(txq);
489 }
490 }
491
netif_tx_unlock(struct net_device * dev)492 void netif_tx_unlock(struct net_device *dev)
493 {
494 netif_unfreeze_queues(dev);
495 spin_unlock(&dev->tx_global_lock);
496 }
497 EXPORT_SYMBOL(netif_tx_unlock);
498
dev_watchdog(struct timer_list * t)499 static void dev_watchdog(struct timer_list *t)
500 {
501 struct net_device *dev = from_timer(dev, t, watchdog_timer);
502 bool release = true;
503
504 spin_lock(&dev->tx_global_lock);
505 if (!qdisc_tx_is_noop(dev)) {
506 if (netif_device_present(dev) &&
507 netif_running(dev) &&
508 netif_carrier_ok(dev)) {
509 int some_queue_timedout = 0;
510 unsigned int i;
511 unsigned long trans_start;
512
513 for (i = 0; i < dev->num_tx_queues; i++) {
514 struct netdev_queue *txq;
515
516 txq = netdev_get_tx_queue(dev, i);
517 trans_start = READ_ONCE(txq->trans_start);
518 if (netif_xmit_stopped(txq) &&
519 time_after(jiffies, (trans_start +
520 dev->watchdog_timeo))) {
521 some_queue_timedout = 1;
522 atomic_long_inc(&txq->trans_timeout);
523 break;
524 }
525 }
526
527 if (unlikely(some_queue_timedout)) {
528 trace_net_dev_xmit_timeout(dev, i);
529 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
530 dev->name, netdev_drivername(dev), i);
531 netif_freeze_queues(dev);
532 dev->netdev_ops->ndo_tx_timeout(dev, i);
533 netif_unfreeze_queues(dev);
534 }
535 if (!mod_timer(&dev->watchdog_timer,
536 round_jiffies(jiffies +
537 dev->watchdog_timeo)))
538 release = false;
539 }
540 }
541 spin_unlock(&dev->tx_global_lock);
542
543 if (release)
544 dev_put_track(dev, &dev->watchdog_dev_tracker);
545 }
546
__netdev_watchdog_up(struct net_device * dev)547 void __netdev_watchdog_up(struct net_device *dev)
548 {
549 if (dev->netdev_ops->ndo_tx_timeout) {
550 if (dev->watchdog_timeo <= 0)
551 dev->watchdog_timeo = 5*HZ;
552 if (!mod_timer(&dev->watchdog_timer,
553 round_jiffies(jiffies + dev->watchdog_timeo)))
554 dev_hold_track(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC);
555 }
556 }
557 EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
558
dev_watchdog_up(struct net_device * dev)559 static void dev_watchdog_up(struct net_device *dev)
560 {
561 __netdev_watchdog_up(dev);
562 }
563
dev_watchdog_down(struct net_device * dev)564 static void dev_watchdog_down(struct net_device *dev)
565 {
566 netif_tx_lock_bh(dev);
567 if (del_timer(&dev->watchdog_timer))
568 dev_put_track(dev, &dev->watchdog_dev_tracker);
569 netif_tx_unlock_bh(dev);
570 }
571
572 /**
573 * netif_carrier_on - set carrier
574 * @dev: network device
575 *
576 * Device has detected acquisition of carrier.
577 */
netif_carrier_on(struct net_device * dev)578 void netif_carrier_on(struct net_device *dev)
579 {
580 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
581 if (dev->reg_state == NETREG_UNINITIALIZED)
582 return;
583 atomic_inc(&dev->carrier_up_count);
584 linkwatch_fire_event(dev);
585 if (netif_running(dev))
586 __netdev_watchdog_up(dev);
587 }
588 }
589 EXPORT_SYMBOL(netif_carrier_on);
590
591 /**
592 * netif_carrier_off - clear carrier
593 * @dev: network device
594 *
595 * Device has detected loss of carrier.
596 */
netif_carrier_off(struct net_device * dev)597 void netif_carrier_off(struct net_device *dev)
598 {
599 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
600 if (dev->reg_state == NETREG_UNINITIALIZED)
601 return;
602 atomic_inc(&dev->carrier_down_count);
603 linkwatch_fire_event(dev);
604 }
605 }
606 EXPORT_SYMBOL(netif_carrier_off);
607
608 /**
609 * netif_carrier_event - report carrier state event
610 * @dev: network device
611 *
612 * Device has detected a carrier event but the carrier state wasn't changed.
613 * Use in drivers when querying carrier state asynchronously, to avoid missing
614 * events (link flaps) if link recovers before it's queried.
615 */
netif_carrier_event(struct net_device * dev)616 void netif_carrier_event(struct net_device *dev)
617 {
618 if (dev->reg_state == NETREG_UNINITIALIZED)
619 return;
620 atomic_inc(&dev->carrier_up_count);
621 atomic_inc(&dev->carrier_down_count);
622 linkwatch_fire_event(dev);
623 }
624 EXPORT_SYMBOL_GPL(netif_carrier_event);
625
626 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
627 under all circumstances. It is difficult to invent anything faster or
628 cheaper.
629 */
630
noop_enqueue(struct sk_buff * skb,struct Qdisc * qdisc,struct sk_buff ** to_free)631 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
632 struct sk_buff **to_free)
633 {
634 __qdisc_drop(skb, to_free);
635 return NET_XMIT_CN;
636 }
637
noop_dequeue(struct Qdisc * qdisc)638 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
639 {
640 return NULL;
641 }
642
643 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
644 .id = "noop",
645 .priv_size = 0,
646 .enqueue = noop_enqueue,
647 .dequeue = noop_dequeue,
648 .peek = noop_dequeue,
649 .owner = THIS_MODULE,
650 };
651
652 static struct netdev_queue noop_netdev_queue = {
653 RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
654 .qdisc_sleeping = &noop_qdisc,
655 };
656
657 struct Qdisc noop_qdisc = {
658 .enqueue = noop_enqueue,
659 .dequeue = noop_dequeue,
660 .flags = TCQ_F_BUILTIN,
661 .ops = &noop_qdisc_ops,
662 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
663 .dev_queue = &noop_netdev_queue,
664 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
665 .gso_skb = {
666 .next = (struct sk_buff *)&noop_qdisc.gso_skb,
667 .prev = (struct sk_buff *)&noop_qdisc.gso_skb,
668 .qlen = 0,
669 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
670 },
671 .skb_bad_txq = {
672 .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
673 .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
674 .qlen = 0,
675 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
676 },
677 };
678 EXPORT_SYMBOL(noop_qdisc);
679
noqueue_init(struct Qdisc * qdisc,struct nlattr * opt,struct netlink_ext_ack * extack)680 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
681 struct netlink_ext_ack *extack)
682 {
683 /* register_qdisc() assigns a default of noop_enqueue if unset,
684 * but __dev_queue_xmit() treats noqueue only as such
685 * if this is NULL - so clear it here. */
686 qdisc->enqueue = NULL;
687 return 0;
688 }
689
690 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
691 .id = "noqueue",
692 .priv_size = 0,
693 .init = noqueue_init,
694 .enqueue = noop_enqueue,
695 .dequeue = noop_dequeue,
696 .peek = noop_dequeue,
697 .owner = THIS_MODULE,
698 };
699
700 static const u8 prio2band[TC_PRIO_MAX + 1] = {
701 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
702 };
703
704 /* 3-band FIFO queue: old style, but should be a bit faster than
705 generic prio+fifo combination.
706 */
707
708 #define PFIFO_FAST_BANDS 3
709
710 /*
711 * Private data for a pfifo_fast scheduler containing:
712 * - rings for priority bands
713 */
714 struct pfifo_fast_priv {
715 struct skb_array q[PFIFO_FAST_BANDS];
716 };
717
band2list(struct pfifo_fast_priv * priv,int band)718 static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
719 int band)
720 {
721 return &priv->q[band];
722 }
723
pfifo_fast_enqueue(struct sk_buff * skb,struct Qdisc * qdisc,struct sk_buff ** to_free)724 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
725 struct sk_buff **to_free)
726 {
727 int band = prio2band[skb->priority & TC_PRIO_MAX];
728 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
729 struct skb_array *q = band2list(priv, band);
730 unsigned int pkt_len = qdisc_pkt_len(skb);
731 int err;
732
733 err = skb_array_produce(q, skb);
734
735 if (unlikely(err)) {
736 if (qdisc_is_percpu_stats(qdisc))
737 return qdisc_drop_cpu(skb, qdisc, to_free);
738 else
739 return qdisc_drop(skb, qdisc, to_free);
740 }
741
742 qdisc_update_stats_at_enqueue(qdisc, pkt_len);
743 return NET_XMIT_SUCCESS;
744 }
745
pfifo_fast_dequeue(struct Qdisc * qdisc)746 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
747 {
748 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
749 struct sk_buff *skb = NULL;
750 bool need_retry = true;
751 int band;
752
753 retry:
754 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
755 struct skb_array *q = band2list(priv, band);
756
757 if (__skb_array_empty(q))
758 continue;
759
760 skb = __skb_array_consume(q);
761 }
762 if (likely(skb)) {
763 qdisc_update_stats_at_dequeue(qdisc, skb);
764 } else if (need_retry &&
765 READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
766 /* Delay clearing the STATE_MISSED here to reduce
767 * the overhead of the second spin_trylock() in
768 * qdisc_run_begin() and __netif_schedule() calling
769 * in qdisc_run_end().
770 */
771 clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
772 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
773
774 /* Make sure dequeuing happens after clearing
775 * STATE_MISSED.
776 */
777 smp_mb__after_atomic();
778
779 need_retry = false;
780
781 goto retry;
782 }
783
784 return skb;
785 }
786
pfifo_fast_peek(struct Qdisc * qdisc)787 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
788 {
789 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
790 struct sk_buff *skb = NULL;
791 int band;
792
793 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
794 struct skb_array *q = band2list(priv, band);
795
796 skb = __skb_array_peek(q);
797 }
798
799 return skb;
800 }
801
pfifo_fast_reset(struct Qdisc * qdisc)802 static void pfifo_fast_reset(struct Qdisc *qdisc)
803 {
804 int i, band;
805 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
806
807 for (band = 0; band < PFIFO_FAST_BANDS; band++) {
808 struct skb_array *q = band2list(priv, band);
809 struct sk_buff *skb;
810
811 /* NULL ring is possible if destroy path is due to a failed
812 * skb_array_init() in pfifo_fast_init() case.
813 */
814 if (!q->ring.queue)
815 continue;
816
817 while ((skb = __skb_array_consume(q)) != NULL)
818 kfree_skb(skb);
819 }
820
821 if (qdisc_is_percpu_stats(qdisc)) {
822 for_each_possible_cpu(i) {
823 struct gnet_stats_queue *q;
824
825 q = per_cpu_ptr(qdisc->cpu_qstats, i);
826 q->backlog = 0;
827 q->qlen = 0;
828 }
829 }
830 }
831
pfifo_fast_dump(struct Qdisc * qdisc,struct sk_buff * skb)832 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
833 {
834 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
835
836 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
837 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
838 goto nla_put_failure;
839 return skb->len;
840
841 nla_put_failure:
842 return -1;
843 }
844
pfifo_fast_init(struct Qdisc * qdisc,struct nlattr * opt,struct netlink_ext_ack * extack)845 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
846 struct netlink_ext_ack *extack)
847 {
848 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
849 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
850 int prio;
851
852 /* guard against zero length rings */
853 if (!qlen)
854 return -EINVAL;
855
856 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
857 struct skb_array *q = band2list(priv, prio);
858 int err;
859
860 err = skb_array_init(q, qlen, GFP_KERNEL);
861 if (err)
862 return -ENOMEM;
863 }
864
865 /* Can by-pass the queue discipline */
866 qdisc->flags |= TCQ_F_CAN_BYPASS;
867 return 0;
868 }
869
pfifo_fast_destroy(struct Qdisc * sch)870 static void pfifo_fast_destroy(struct Qdisc *sch)
871 {
872 struct pfifo_fast_priv *priv = qdisc_priv(sch);
873 int prio;
874
875 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
876 struct skb_array *q = band2list(priv, prio);
877
878 /* NULL ring is possible if destroy path is due to a failed
879 * skb_array_init() in pfifo_fast_init() case.
880 */
881 if (!q->ring.queue)
882 continue;
883 /* Destroy ring but no need to kfree_skb because a call to
884 * pfifo_fast_reset() has already done that work.
885 */
886 ptr_ring_cleanup(&q->ring, NULL);
887 }
888 }
889
pfifo_fast_change_tx_queue_len(struct Qdisc * sch,unsigned int new_len)890 static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
891 unsigned int new_len)
892 {
893 struct pfifo_fast_priv *priv = qdisc_priv(sch);
894 struct skb_array *bands[PFIFO_FAST_BANDS];
895 int prio;
896
897 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
898 struct skb_array *q = band2list(priv, prio);
899
900 bands[prio] = q;
901 }
902
903 return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
904 GFP_KERNEL);
905 }
906
907 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
908 .id = "pfifo_fast",
909 .priv_size = sizeof(struct pfifo_fast_priv),
910 .enqueue = pfifo_fast_enqueue,
911 .dequeue = pfifo_fast_dequeue,
912 .peek = pfifo_fast_peek,
913 .init = pfifo_fast_init,
914 .destroy = pfifo_fast_destroy,
915 .reset = pfifo_fast_reset,
916 .dump = pfifo_fast_dump,
917 .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
918 .owner = THIS_MODULE,
919 .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
920 };
921 EXPORT_SYMBOL(pfifo_fast_ops);
922
923 static struct lock_class_key qdisc_tx_busylock;
924
qdisc_alloc(struct netdev_queue * dev_queue,const struct Qdisc_ops * ops,struct netlink_ext_ack * extack)925 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
926 const struct Qdisc_ops *ops,
927 struct netlink_ext_ack *extack)
928 {
929 struct Qdisc *sch;
930 unsigned int size = sizeof(*sch) + ops->priv_size;
931 int err = -ENOBUFS;
932 struct net_device *dev;
933
934 if (!dev_queue) {
935 NL_SET_ERR_MSG(extack, "No device queue given");
936 err = -EINVAL;
937 goto errout;
938 }
939
940 dev = dev_queue->dev;
941 sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
942
943 if (!sch)
944 goto errout;
945 __skb_queue_head_init(&sch->gso_skb);
946 __skb_queue_head_init(&sch->skb_bad_txq);
947 qdisc_skb_head_init(&sch->q);
948 gnet_stats_basic_sync_init(&sch->bstats);
949 spin_lock_init(&sch->q.lock);
950
951 if (ops->static_flags & TCQ_F_CPUSTATS) {
952 sch->cpu_bstats =
953 netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
954 if (!sch->cpu_bstats)
955 goto errout1;
956
957 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
958 if (!sch->cpu_qstats) {
959 free_percpu(sch->cpu_bstats);
960 goto errout1;
961 }
962 }
963
964 spin_lock_init(&sch->busylock);
965 lockdep_set_class(&sch->busylock,
966 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
967
968 /* seqlock has the same scope of busylock, for NOLOCK qdisc */
969 spin_lock_init(&sch->seqlock);
970 lockdep_set_class(&sch->seqlock,
971 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
972
973 sch->ops = ops;
974 sch->flags = ops->static_flags;
975 sch->enqueue = ops->enqueue;
976 sch->dequeue = ops->dequeue;
977 sch->dev_queue = dev_queue;
978 dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL);
979 refcount_set(&sch->refcnt, 1);
980
981 return sch;
982 errout1:
983 kfree(sch);
984 errout:
985 return ERR_PTR(err);
986 }
987
qdisc_create_dflt(struct netdev_queue * dev_queue,const struct Qdisc_ops * ops,unsigned int parentid,struct netlink_ext_ack * extack)988 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
989 const struct Qdisc_ops *ops,
990 unsigned int parentid,
991 struct netlink_ext_ack *extack)
992 {
993 struct Qdisc *sch;
994
995 if (!try_module_get(ops->owner)) {
996 NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
997 return NULL;
998 }
999
1000 sch = qdisc_alloc(dev_queue, ops, extack);
1001 if (IS_ERR(sch)) {
1002 module_put(ops->owner);
1003 return NULL;
1004 }
1005 sch->parent = parentid;
1006
1007 if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1008 trace_qdisc_create(ops, dev_queue->dev, parentid);
1009 return sch;
1010 }
1011
1012 qdisc_put(sch);
1013 return NULL;
1014 }
1015 EXPORT_SYMBOL(qdisc_create_dflt);
1016
1017 /* Under qdisc_lock(qdisc) and BH! */
1018
qdisc_reset(struct Qdisc * qdisc)1019 void qdisc_reset(struct Qdisc *qdisc)
1020 {
1021 const struct Qdisc_ops *ops = qdisc->ops;
1022
1023 trace_qdisc_reset(qdisc);
1024
1025 if (ops->reset)
1026 ops->reset(qdisc);
1027
1028 __skb_queue_purge(&qdisc->gso_skb);
1029 __skb_queue_purge(&qdisc->skb_bad_txq);
1030
1031 qdisc->q.qlen = 0;
1032 qdisc->qstats.backlog = 0;
1033 }
1034 EXPORT_SYMBOL(qdisc_reset);
1035
qdisc_free(struct Qdisc * qdisc)1036 void qdisc_free(struct Qdisc *qdisc)
1037 {
1038 if (qdisc_is_percpu_stats(qdisc)) {
1039 free_percpu(qdisc->cpu_bstats);
1040 free_percpu(qdisc->cpu_qstats);
1041 }
1042
1043 kfree(qdisc);
1044 }
1045
qdisc_free_cb(struct rcu_head * head)1046 static void qdisc_free_cb(struct rcu_head *head)
1047 {
1048 struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1049
1050 qdisc_free(q);
1051 }
1052
qdisc_destroy(struct Qdisc * qdisc)1053 static void qdisc_destroy(struct Qdisc *qdisc)
1054 {
1055 const struct Qdisc_ops *ops = qdisc->ops;
1056
1057 #ifdef CONFIG_NET_SCHED
1058 qdisc_hash_del(qdisc);
1059
1060 qdisc_put_stab(rtnl_dereference(qdisc->stab));
1061 #endif
1062 gen_kill_estimator(&qdisc->rate_est);
1063
1064 qdisc_reset(qdisc);
1065
1066 if (ops->destroy)
1067 ops->destroy(qdisc);
1068
1069 module_put(ops->owner);
1070 dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker);
1071
1072 trace_qdisc_destroy(qdisc);
1073
1074 call_rcu(&qdisc->rcu, qdisc_free_cb);
1075 }
1076
qdisc_put(struct Qdisc * qdisc)1077 void qdisc_put(struct Qdisc *qdisc)
1078 {
1079 if (!qdisc)
1080 return;
1081
1082 if (qdisc->flags & TCQ_F_BUILTIN ||
1083 !refcount_dec_and_test(&qdisc->refcnt))
1084 return;
1085
1086 qdisc_destroy(qdisc);
1087 }
1088 EXPORT_SYMBOL(qdisc_put);
1089
1090 /* Version of qdisc_put() that is called with rtnl mutex unlocked.
1091 * Intended to be used as optimization, this function only takes rtnl lock if
1092 * qdisc reference counter reached zero.
1093 */
1094
qdisc_put_unlocked(struct Qdisc * qdisc)1095 void qdisc_put_unlocked(struct Qdisc *qdisc)
1096 {
1097 if (qdisc->flags & TCQ_F_BUILTIN ||
1098 !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1099 return;
1100
1101 qdisc_destroy(qdisc);
1102 rtnl_unlock();
1103 }
1104 EXPORT_SYMBOL(qdisc_put_unlocked);
1105
1106 /* Attach toplevel qdisc to device queue. */
dev_graft_qdisc(struct netdev_queue * dev_queue,struct Qdisc * qdisc)1107 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1108 struct Qdisc *qdisc)
1109 {
1110 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
1111 spinlock_t *root_lock;
1112
1113 root_lock = qdisc_lock(oqdisc);
1114 spin_lock_bh(root_lock);
1115
1116 /* ... and graft new one */
1117 if (qdisc == NULL)
1118 qdisc = &noop_qdisc;
1119 dev_queue->qdisc_sleeping = qdisc;
1120 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1121
1122 spin_unlock_bh(root_lock);
1123
1124 return oqdisc;
1125 }
1126 EXPORT_SYMBOL(dev_graft_qdisc);
1127
shutdown_scheduler_queue(struct net_device * dev,struct netdev_queue * dev_queue,void * _qdisc_default)1128 static void shutdown_scheduler_queue(struct net_device *dev,
1129 struct netdev_queue *dev_queue,
1130 void *_qdisc_default)
1131 {
1132 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1133 struct Qdisc *qdisc_default = _qdisc_default;
1134
1135 if (qdisc) {
1136 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1137 dev_queue->qdisc_sleeping = qdisc_default;
1138
1139 qdisc_put(qdisc);
1140 }
1141 }
1142
attach_one_default_qdisc(struct net_device * dev,struct netdev_queue * dev_queue,void * _unused)1143 static void attach_one_default_qdisc(struct net_device *dev,
1144 struct netdev_queue *dev_queue,
1145 void *_unused)
1146 {
1147 struct Qdisc *qdisc;
1148 const struct Qdisc_ops *ops = default_qdisc_ops;
1149
1150 if (dev->priv_flags & IFF_NO_QUEUE)
1151 ops = &noqueue_qdisc_ops;
1152 else if(dev->type == ARPHRD_CAN)
1153 ops = &pfifo_fast_ops;
1154
1155 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1156 if (!qdisc)
1157 return;
1158
1159 if (!netif_is_multiqueue(dev))
1160 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1161 dev_queue->qdisc_sleeping = qdisc;
1162 }
1163
attach_default_qdiscs(struct net_device * dev)1164 static void attach_default_qdiscs(struct net_device *dev)
1165 {
1166 struct netdev_queue *txq;
1167 struct Qdisc *qdisc;
1168
1169 txq = netdev_get_tx_queue(dev, 0);
1170
1171 if (!netif_is_multiqueue(dev) ||
1172 dev->priv_flags & IFF_NO_QUEUE) {
1173 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1174 qdisc = txq->qdisc_sleeping;
1175 rcu_assign_pointer(dev->qdisc, qdisc);
1176 qdisc_refcount_inc(qdisc);
1177 } else {
1178 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1179 if (qdisc) {
1180 rcu_assign_pointer(dev->qdisc, qdisc);
1181 qdisc->ops->attach(qdisc);
1182 }
1183 }
1184 qdisc = rtnl_dereference(dev->qdisc);
1185
1186 /* Detect default qdisc setup/init failed and fallback to "noqueue" */
1187 if (qdisc == &noop_qdisc) {
1188 netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1189 default_qdisc_ops->id, noqueue_qdisc_ops.id);
1190 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1191 dev->priv_flags |= IFF_NO_QUEUE;
1192 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1193 qdisc = txq->qdisc_sleeping;
1194 rcu_assign_pointer(dev->qdisc, qdisc);
1195 qdisc_refcount_inc(qdisc);
1196 dev->priv_flags ^= IFF_NO_QUEUE;
1197 }
1198
1199 #ifdef CONFIG_NET_SCHED
1200 if (qdisc != &noop_qdisc)
1201 qdisc_hash_add(qdisc, false);
1202 #endif
1203 }
1204
transition_one_qdisc(struct net_device * dev,struct netdev_queue * dev_queue,void * _need_watchdog)1205 static void transition_one_qdisc(struct net_device *dev,
1206 struct netdev_queue *dev_queue,
1207 void *_need_watchdog)
1208 {
1209 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
1210 int *need_watchdog_p = _need_watchdog;
1211
1212 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1213 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1214
1215 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1216 if (need_watchdog_p) {
1217 WRITE_ONCE(dev_queue->trans_start, 0);
1218 *need_watchdog_p = 1;
1219 }
1220 }
1221
dev_activate(struct net_device * dev)1222 void dev_activate(struct net_device *dev)
1223 {
1224 int need_watchdog;
1225
1226 /* No queueing discipline is attached to device;
1227 * create default one for devices, which need queueing
1228 * and noqueue_qdisc for virtual interfaces
1229 */
1230
1231 if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
1232 attach_default_qdiscs(dev);
1233
1234 if (!netif_carrier_ok(dev))
1235 /* Delay activation until next carrier-on event */
1236 return;
1237
1238 need_watchdog = 0;
1239 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1240 if (dev_ingress_queue(dev))
1241 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1242
1243 if (need_watchdog) {
1244 netif_trans_update(dev);
1245 dev_watchdog_up(dev);
1246 }
1247 }
1248 EXPORT_SYMBOL(dev_activate);
1249
qdisc_deactivate(struct Qdisc * qdisc)1250 static void qdisc_deactivate(struct Qdisc *qdisc)
1251 {
1252 if (qdisc->flags & TCQ_F_BUILTIN)
1253 return;
1254
1255 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1256 }
1257
dev_deactivate_queue(struct net_device * dev,struct netdev_queue * dev_queue,void * _qdisc_default)1258 static void dev_deactivate_queue(struct net_device *dev,
1259 struct netdev_queue *dev_queue,
1260 void *_qdisc_default)
1261 {
1262 struct Qdisc *qdisc_default = _qdisc_default;
1263 struct Qdisc *qdisc;
1264
1265 qdisc = rtnl_dereference(dev_queue->qdisc);
1266 if (qdisc) {
1267 qdisc_deactivate(qdisc);
1268 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1269 }
1270 }
1271
dev_reset_queue(struct net_device * dev,struct netdev_queue * dev_queue,void * _unused)1272 static void dev_reset_queue(struct net_device *dev,
1273 struct netdev_queue *dev_queue,
1274 void *_unused)
1275 {
1276 struct Qdisc *qdisc;
1277 bool nolock;
1278
1279 qdisc = dev_queue->qdisc_sleeping;
1280 if (!qdisc)
1281 return;
1282
1283 nolock = qdisc->flags & TCQ_F_NOLOCK;
1284
1285 if (nolock)
1286 spin_lock_bh(&qdisc->seqlock);
1287 spin_lock_bh(qdisc_lock(qdisc));
1288
1289 qdisc_reset(qdisc);
1290
1291 spin_unlock_bh(qdisc_lock(qdisc));
1292 if (nolock) {
1293 clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1294 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1295 spin_unlock_bh(&qdisc->seqlock);
1296 }
1297 }
1298
some_qdisc_is_busy(struct net_device * dev)1299 static bool some_qdisc_is_busy(struct net_device *dev)
1300 {
1301 unsigned int i;
1302
1303 for (i = 0; i < dev->num_tx_queues; i++) {
1304 struct netdev_queue *dev_queue;
1305 spinlock_t *root_lock;
1306 struct Qdisc *q;
1307 int val;
1308
1309 dev_queue = netdev_get_tx_queue(dev, i);
1310 q = dev_queue->qdisc_sleeping;
1311
1312 root_lock = qdisc_lock(q);
1313 spin_lock_bh(root_lock);
1314
1315 val = (qdisc_is_running(q) ||
1316 test_bit(__QDISC_STATE_SCHED, &q->state));
1317
1318 spin_unlock_bh(root_lock);
1319
1320 if (val)
1321 return true;
1322 }
1323 return false;
1324 }
1325
1326 /**
1327 * dev_deactivate_many - deactivate transmissions on several devices
1328 * @head: list of devices to deactivate
1329 *
1330 * This function returns only when all outstanding transmissions
1331 * have completed, unless all devices are in dismantle phase.
1332 */
dev_deactivate_many(struct list_head * head)1333 void dev_deactivate_many(struct list_head *head)
1334 {
1335 struct net_device *dev;
1336
1337 list_for_each_entry(dev, head, close_list) {
1338 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1339 &noop_qdisc);
1340 if (dev_ingress_queue(dev))
1341 dev_deactivate_queue(dev, dev_ingress_queue(dev),
1342 &noop_qdisc);
1343
1344 dev_watchdog_down(dev);
1345 }
1346
1347 /* Wait for outstanding qdisc-less dev_queue_xmit calls or
1348 * outstanding qdisc enqueuing calls.
1349 * This is avoided if all devices are in dismantle phase :
1350 * Caller will call synchronize_net() for us
1351 */
1352 synchronize_net();
1353
1354 list_for_each_entry(dev, head, close_list) {
1355 netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1356
1357 if (dev_ingress_queue(dev))
1358 dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1359 }
1360
1361 /* Wait for outstanding qdisc_run calls. */
1362 list_for_each_entry(dev, head, close_list) {
1363 while (some_qdisc_is_busy(dev)) {
1364 /* wait_event() would avoid this sleep-loop but would
1365 * require expensive checks in the fast paths of packet
1366 * processing which isn't worth it.
1367 */
1368 schedule_timeout_uninterruptible(1);
1369 }
1370 }
1371 }
1372
dev_deactivate(struct net_device * dev)1373 void dev_deactivate(struct net_device *dev)
1374 {
1375 LIST_HEAD(single);
1376
1377 list_add(&dev->close_list, &single);
1378 dev_deactivate_many(&single);
1379 list_del(&single);
1380 }
1381 EXPORT_SYMBOL(dev_deactivate);
1382
qdisc_change_tx_queue_len(struct net_device * dev,struct netdev_queue * dev_queue)1383 static int qdisc_change_tx_queue_len(struct net_device *dev,
1384 struct netdev_queue *dev_queue)
1385 {
1386 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1387 const struct Qdisc_ops *ops = qdisc->ops;
1388
1389 if (ops->change_tx_queue_len)
1390 return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1391 return 0;
1392 }
1393
dev_qdisc_change_real_num_tx(struct net_device * dev,unsigned int new_real_tx)1394 void dev_qdisc_change_real_num_tx(struct net_device *dev,
1395 unsigned int new_real_tx)
1396 {
1397 struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1398
1399 if (qdisc->ops->change_real_num_tx)
1400 qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1401 }
1402
mq_change_real_num_tx(struct Qdisc * sch,unsigned int new_real_tx)1403 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1404 {
1405 #ifdef CONFIG_NET_SCHED
1406 struct net_device *dev = qdisc_dev(sch);
1407 struct Qdisc *qdisc;
1408 unsigned int i;
1409
1410 for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1411 qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
1412 /* Only update the default qdiscs we created,
1413 * qdiscs with handles are always hashed.
1414 */
1415 if (qdisc != &noop_qdisc && !qdisc->handle)
1416 qdisc_hash_del(qdisc);
1417 }
1418 for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1419 qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
1420 if (qdisc != &noop_qdisc && !qdisc->handle)
1421 qdisc_hash_add(qdisc, false);
1422 }
1423 #endif
1424 }
1425 EXPORT_SYMBOL(mq_change_real_num_tx);
1426
dev_qdisc_change_tx_queue_len(struct net_device * dev)1427 int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1428 {
1429 bool up = dev->flags & IFF_UP;
1430 unsigned int i;
1431 int ret = 0;
1432
1433 if (up)
1434 dev_deactivate(dev);
1435
1436 for (i = 0; i < dev->num_tx_queues; i++) {
1437 ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1438
1439 /* TODO: revert changes on a partial failure */
1440 if (ret)
1441 break;
1442 }
1443
1444 if (up)
1445 dev_activate(dev);
1446 return ret;
1447 }
1448
dev_init_scheduler_queue(struct net_device * dev,struct netdev_queue * dev_queue,void * _qdisc)1449 static void dev_init_scheduler_queue(struct net_device *dev,
1450 struct netdev_queue *dev_queue,
1451 void *_qdisc)
1452 {
1453 struct Qdisc *qdisc = _qdisc;
1454
1455 rcu_assign_pointer(dev_queue->qdisc, qdisc);
1456 dev_queue->qdisc_sleeping = qdisc;
1457 }
1458
dev_init_scheduler(struct net_device * dev)1459 void dev_init_scheduler(struct net_device *dev)
1460 {
1461 rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1462 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1463 if (dev_ingress_queue(dev))
1464 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1465
1466 timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1467 }
1468
dev_shutdown(struct net_device * dev)1469 void dev_shutdown(struct net_device *dev)
1470 {
1471 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1472 if (dev_ingress_queue(dev))
1473 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1474 qdisc_put(rtnl_dereference(dev->qdisc));
1475 rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1476
1477 WARN_ON(timer_pending(&dev->watchdog_timer));
1478 }
1479
1480 /**
1481 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1482 * @rate: Rate to compute reciprocal division values of
1483 * @mult: Multiplier for reciprocal division
1484 * @shift: Shift for reciprocal division
1485 *
1486 * The multiplier and shift for reciprocal division by rate are stored
1487 * in mult and shift.
1488 *
1489 * The deal here is to replace a divide by a reciprocal one
1490 * in fast path (a reciprocal divide is a multiply and a shift)
1491 *
1492 * Normal formula would be :
1493 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1494 *
1495 * We compute mult/shift to use instead :
1496 * time_in_ns = (len * mult) >> shift;
1497 *
1498 * We try to get the highest possible mult value for accuracy,
1499 * but have to make sure no overflows will ever happen.
1500 *
1501 * reciprocal_value() is not used here it doesn't handle 64-bit values.
1502 */
psched_ratecfg_precompute__(u64 rate,u32 * mult,u8 * shift)1503 static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1504 {
1505 u64 factor = NSEC_PER_SEC;
1506
1507 *mult = 1;
1508 *shift = 0;
1509
1510 if (rate <= 0)
1511 return;
1512
1513 for (;;) {
1514 *mult = div64_u64(factor, rate);
1515 if (*mult & (1U << 31) || factor & (1ULL << 63))
1516 break;
1517 factor <<= 1;
1518 (*shift)++;
1519 }
1520 }
1521
psched_ratecfg_precompute(struct psched_ratecfg * r,const struct tc_ratespec * conf,u64 rate64)1522 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1523 const struct tc_ratespec *conf,
1524 u64 rate64)
1525 {
1526 memset(r, 0, sizeof(*r));
1527 r->overhead = conf->overhead;
1528 r->mpu = conf->mpu;
1529 r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1530 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1531 psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
1532 }
1533 EXPORT_SYMBOL(psched_ratecfg_precompute);
1534
psched_ppscfg_precompute(struct psched_pktrate * r,u64 pktrate64)1535 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1536 {
1537 r->rate_pkts_ps = pktrate64;
1538 psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1539 }
1540 EXPORT_SYMBOL(psched_ppscfg_precompute);
1541
mini_qdisc_pair_swap(struct mini_Qdisc_pair * miniqp,struct tcf_proto * tp_head)1542 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1543 struct tcf_proto *tp_head)
1544 {
1545 /* Protected with chain0->filter_chain_lock.
1546 * Can't access chain directly because tp_head can be NULL.
1547 */
1548 struct mini_Qdisc *miniq_old =
1549 rcu_dereference_protected(*miniqp->p_miniq, 1);
1550 struct mini_Qdisc *miniq;
1551
1552 if (!tp_head) {
1553 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1554 } else {
1555 miniq = miniq_old != &miniqp->miniq1 ?
1556 &miniqp->miniq1 : &miniqp->miniq2;
1557
1558 /* We need to make sure that readers won't see the miniq
1559 * we are about to modify. So ensure that at least one RCU
1560 * grace period has elapsed since the miniq was made
1561 * inactive.
1562 */
1563 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1564 cond_synchronize_rcu(miniq->rcu_state);
1565 else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1566 synchronize_rcu_expedited();
1567
1568 miniq->filter_list = tp_head;
1569 rcu_assign_pointer(*miniqp->p_miniq, miniq);
1570 }
1571
1572 if (miniq_old)
1573 /* This is counterpart of the rcu sync above. We need to
1574 * block potential new user of miniq_old until all readers
1575 * are not seeing it.
1576 */
1577 miniq_old->rcu_state = start_poll_synchronize_rcu();
1578 }
1579 EXPORT_SYMBOL(mini_qdisc_pair_swap);
1580
mini_qdisc_pair_block_init(struct mini_Qdisc_pair * miniqp,struct tcf_block * block)1581 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1582 struct tcf_block *block)
1583 {
1584 miniqp->miniq1.block = block;
1585 miniqp->miniq2.block = block;
1586 }
1587 EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1588
mini_qdisc_pair_init(struct mini_Qdisc_pair * miniqp,struct Qdisc * qdisc,struct mini_Qdisc __rcu ** p_miniq)1589 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1590 struct mini_Qdisc __rcu **p_miniq)
1591 {
1592 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1593 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1594 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1595 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1596 miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1597 miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1598 miniqp->p_miniq = p_miniq;
1599 }
1600 EXPORT_SYMBOL(mini_qdisc_pair_init);
1601