blob: 138ea92ed268457b99d9c6b44a1a9580cc7c016a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11 * - Ingress support
12 */
13
14#include <asm/uaccess.h>
15#include <asm/system.h>
16#include <linux/bitops.h>
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/init.h>
33#include <linux/rcupdate.h>
34#include <linux/list.h>
35#include <net/sock.h>
36#include <net/pkt_sched.h>
37
38/* Main transmission queue. */
39
40/* Main qdisc structure lock.
41
42 However, modifications
43 to data, participating in scheduling must be additionally
44 protected with dev->queue_lock spinlock.
45
46 The idea is the following:
47 - enqueue, dequeue are serialized via top level device
48 spinlock dev->queue_lock.
49 - tree walking is protected by read_lock_bh(qdisc_tree_lock)
50 and this lock is used only in process context.
51 - updates to tree are made under rtnl semaphore or
52 from softirq context (__qdisc_destroy rcu-callback)
53 hence this lock needs local bh disabling.
54
55 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
56 */
57DEFINE_RWLOCK(qdisc_tree_lock);
58
59void qdisc_lock_tree(struct net_device *dev)
60{
61 write_lock_bh(&qdisc_tree_lock);
62 spin_lock_bh(&dev->queue_lock);
63}
64
65void qdisc_unlock_tree(struct net_device *dev)
66{
67 spin_unlock_bh(&dev->queue_lock);
68 write_unlock_bh(&qdisc_tree_lock);
69}
70
71/*
72 dev->queue_lock serializes queue accesses for this device
73 AND dev->qdisc pointer itself.
74
75 dev->xmit_lock serializes accesses to device driver.
76
77 dev->queue_lock and dev->xmit_lock are mutually exclusive,
78 if one is grabbed, another must be free.
79 */
80
81
82/* Kick device.
83 Note, that this procedure can be called by a watchdog timer, so that
84 we do not check dev->tbusy flag here.
85
86 Returns: 0 - queue is empty.
87 >0 - queue is not empty, but throttled.
88 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
89
90 NOTE: Called under dev->queue_lock with locally disabled BH.
91*/
92
93int qdisc_restart(struct net_device *dev)
94{
95 struct Qdisc *q = dev->qdisc;
96 struct sk_buff *skb;
97
98 /* Dequeue packet */
99 if ((skb = q->dequeue(q)) != NULL) {
100 unsigned nolock = (dev->features & NETIF_F_LLTX);
101 /*
102 * When the driver has LLTX set it does its own locking
103 * in start_xmit. No need to add additional overhead by
104 * locking again. These checks are worth it because
105 * even uncongested locks can be quite expensive.
106 * The driver can do trylock like here too, in case
107 * of lock congestion it should return -1 and the packet
108 * will be requeued.
109 */
110 if (!nolock) {
111 if (!spin_trylock(&dev->xmit_lock)) {
112 collision:
113 /* So, someone grabbed the driver. */
114
115 /* It may be transient configuration error,
116 when hard_start_xmit() recurses. We detect
117 it by checking xmit owner and drop the
118 packet when deadloop is detected.
119 */
120 if (dev->xmit_lock_owner == smp_processor_id()) {
121 kfree_skb(skb);
122 if (net_ratelimit())
123 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
124 return -1;
125 }
126 __get_cpu_var(netdev_rx_stat).cpu_collision++;
127 goto requeue;
128 }
129 /* Remember that the driver is grabbed by us. */
130 dev->xmit_lock_owner = smp_processor_id();
131 }
132
133 {
134 /* And release queue */
135 spin_unlock(&dev->queue_lock);
136
137 if (!netif_queue_stopped(dev)) {
138 int ret;
139 if (netdev_nit)
140 dev_queue_xmit_nit(skb, dev);
141
142 ret = dev->hard_start_xmit(skb, dev);
143 if (ret == NETDEV_TX_OK) {
144 if (!nolock) {
145 dev->xmit_lock_owner = -1;
146 spin_unlock(&dev->xmit_lock);
147 }
148 spin_lock(&dev->queue_lock);
149 return -1;
150 }
151 if (ret == NETDEV_TX_LOCKED && nolock) {
152 spin_lock(&dev->queue_lock);
153 goto collision;
154 }
155 }
156
157 /* NETDEV_TX_BUSY - we need to requeue */
158 /* Release the driver */
159 if (!nolock) {
160 dev->xmit_lock_owner = -1;
161 spin_unlock(&dev->xmit_lock);
162 }
163 spin_lock(&dev->queue_lock);
164 q = dev->qdisc;
165 }
166
167 /* Device kicked us out :(
168 This is possible in three cases:
169
170 0. driver is locked
171 1. fastroute is enabled
172 2. device cannot determine busy state
173 before start of transmission (f.e. dialout)
174 3. device is buggy (ppp)
175 */
176
177requeue:
178 q->ops->requeue(skb, q);
179 netif_schedule(dev);
180 return 1;
181 }
Stephen Hemminger8cbe1d42005-05-03 16:24:03 -0700182 BUG_ON((int) q->q.qlen < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 return q->q.qlen;
184}
185
186static void dev_watchdog(unsigned long arg)
187{
188 struct net_device *dev = (struct net_device *)arg;
189
190 spin_lock(&dev->xmit_lock);
191 if (dev->qdisc != &noop_qdisc) {
192 if (netif_device_present(dev) &&
193 netif_running(dev) &&
194 netif_carrier_ok(dev)) {
195 if (netif_queue_stopped(dev) &&
Stephen Hemminger338f7562006-05-16 15:02:12 -0700196 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
197
198 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
199 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 dev->tx_timeout(dev);
201 }
202 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
203 dev_hold(dev);
204 }
205 }
206 spin_unlock(&dev->xmit_lock);
207
208 dev_put(dev);
209}
210
211static void dev_watchdog_init(struct net_device *dev)
212{
213 init_timer(&dev->watchdog_timer);
214 dev->watchdog_timer.data = (unsigned long)dev;
215 dev->watchdog_timer.function = dev_watchdog;
216}
217
218void __netdev_watchdog_up(struct net_device *dev)
219{
220 if (dev->tx_timeout) {
221 if (dev->watchdog_timeo <= 0)
222 dev->watchdog_timeo = 5*HZ;
223 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
224 dev_hold(dev);
225 }
226}
227
228static void dev_watchdog_up(struct net_device *dev)
229{
230 spin_lock_bh(&dev->xmit_lock);
231 __netdev_watchdog_up(dev);
232 spin_unlock_bh(&dev->xmit_lock);
233}
234
235static void dev_watchdog_down(struct net_device *dev)
236{
237 spin_lock_bh(&dev->xmit_lock);
238 if (del_timer(&dev->watchdog_timer))
Stephen Hemminger15333062006-03-20 22:32:28 -0800239 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 spin_unlock_bh(&dev->xmit_lock);
241}
242
Denis Vlasenko0a242ef2005-08-11 15:32:53 -0700243void netif_carrier_on(struct net_device *dev)
244{
245 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
246 linkwatch_fire_event(dev);
247 if (netif_running(dev))
248 __netdev_watchdog_up(dev);
249}
250
251void netif_carrier_off(struct net_device *dev)
252{
253 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
254 linkwatch_fire_event(dev);
255}
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
258 under all circumstances. It is difficult to invent anything faster or
259 cheaper.
260 */
261
Thomas Graf94df1092005-06-18 22:59:08 -0700262static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
264 kfree_skb(skb);
265 return NET_XMIT_CN;
266}
267
Thomas Graf94df1092005-06-18 22:59:08 -0700268static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
270 return NULL;
271}
272
Thomas Graf94df1092005-06-18 22:59:08 -0700273static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
275 if (net_ratelimit())
Thomas Graf94df1092005-06-18 22:59:08 -0700276 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
277 skb->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 kfree_skb(skb);
279 return NET_XMIT_CN;
280}
281
282struct Qdisc_ops noop_qdisc_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 .id = "noop",
284 .priv_size = 0,
285 .enqueue = noop_enqueue,
286 .dequeue = noop_dequeue,
287 .requeue = noop_requeue,
288 .owner = THIS_MODULE,
289};
290
291struct Qdisc noop_qdisc = {
292 .enqueue = noop_enqueue,
293 .dequeue = noop_dequeue,
294 .flags = TCQ_F_BUILTIN,
295 .ops = &noop_qdisc_ops,
296 .list = LIST_HEAD_INIT(noop_qdisc.list),
297};
298
299static struct Qdisc_ops noqueue_qdisc_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 .id = "noqueue",
301 .priv_size = 0,
302 .enqueue = noop_enqueue,
303 .dequeue = noop_dequeue,
304 .requeue = noop_requeue,
305 .owner = THIS_MODULE,
306};
307
308static struct Qdisc noqueue_qdisc = {
309 .enqueue = NULL,
310 .dequeue = noop_dequeue,
311 .flags = TCQ_F_BUILTIN,
312 .ops = &noqueue_qdisc_ops,
313 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
314};
315
316
317static const u8 prio2band[TC_PRIO_MAX+1] =
318 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
319
320/* 3-band FIFO queue: old style, but should be a bit faster than
321 generic prio+fifo combination.
322 */
323
Thomas Graff87a9c32005-06-18 22:58:53 -0700324#define PFIFO_FAST_BANDS 3
325
Thomas Graf321090e2005-06-18 22:58:35 -0700326static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
327 struct Qdisc *qdisc)
328{
329 struct sk_buff_head *list = qdisc_priv(qdisc);
330 return list + prio2band[skb->priority & TC_PRIO_MAX];
331}
332
Thomas Graff87a9c32005-06-18 22:58:53 -0700333static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Thomas Graf321090e2005-06-18 22:58:35 -0700335 struct sk_buff_head *list = prio2list(skb, qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Thomas Graf821d24a2005-06-18 22:58:15 -0700337 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 qdisc->q.qlen++;
Thomas Graf821d24a2005-06-18 22:58:15 -0700339 return __qdisc_enqueue_tail(skb, qdisc, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 }
Thomas Graf821d24a2005-06-18 22:58:15 -0700341
342 return qdisc_drop(skb, qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
Thomas Graff87a9c32005-06-18 22:58:53 -0700345static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
347 int prio;
348 struct sk_buff_head *list = qdisc_priv(qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Thomas Graf452f2992005-07-18 13:30:53 -0700350 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
351 if (!skb_queue_empty(list + prio)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 qdisc->q.qlen--;
Thomas Graf452f2992005-07-18 13:30:53 -0700353 return __qdisc_dequeue_head(qdisc, list + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 }
355 }
Thomas Graff87a9c32005-06-18 22:58:53 -0700356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return NULL;
358}
359
Thomas Graff87a9c32005-06-18 22:58:53 -0700360static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 qdisc->q.qlen++;
Thomas Graf321090e2005-06-18 22:58:35 -0700363 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
Thomas Graff87a9c32005-06-18 22:58:53 -0700366static void pfifo_fast_reset(struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
368 int prio;
369 struct sk_buff_head *list = qdisc_priv(qdisc);
370
Thomas Graff87a9c32005-06-18 22:58:53 -0700371 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
Thomas Graf821d24a2005-06-18 22:58:15 -0700372 __qdisc_reset_queue(qdisc, list + prio);
373
374 qdisc->qstats.backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 qdisc->q.qlen = 0;
376}
377
378static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
379{
Thomas Graff87a9c32005-06-18 22:58:53 -0700380 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
383 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
384 return skb->len;
385
386rtattr_failure:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 return -1;
388}
389
390static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
391{
Thomas Graff87a9c32005-06-18 22:58:53 -0700392 int prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 struct sk_buff_head *list = qdisc_priv(qdisc);
394
Thomas Graff87a9c32005-06-18 22:58:53 -0700395 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
396 skb_queue_head_init(list + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
398 return 0;
399}
400
401static struct Qdisc_ops pfifo_fast_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 .id = "pfifo_fast",
Thomas Graff87a9c32005-06-18 22:58:53 -0700403 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 .enqueue = pfifo_fast_enqueue,
405 .dequeue = pfifo_fast_dequeue,
406 .requeue = pfifo_fast_requeue,
407 .init = pfifo_fast_init,
408 .reset = pfifo_fast_reset,
409 .dump = pfifo_fast_dump,
410 .owner = THIS_MODULE,
411};
412
Thomas Graf3d54b822005-07-05 14:15:09 -0700413struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
415 void *p;
416 struct Qdisc *sch;
Thomas Graf3d54b822005-07-05 14:15:09 -0700417 unsigned int size;
418 int err = -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 /* ensure that the Qdisc and the private data are 32-byte aligned */
Thomas Graf3d54b822005-07-05 14:15:09 -0700421 size = QDISC_ALIGN(sizeof(*sch));
422 size += ops->priv_size + (QDISC_ALIGNTO - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424 p = kmalloc(size, GFP_KERNEL);
425 if (!p)
Thomas Graf3d54b822005-07-05 14:15:09 -0700426 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 memset(p, 0, size);
Thomas Graf3d54b822005-07-05 14:15:09 -0700428 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
429 sch->padded = (char *) sch - (char *) p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
431 INIT_LIST_HEAD(&sch->list);
432 skb_queue_head_init(&sch->q);
433 sch->ops = ops;
434 sch->enqueue = ops->enqueue;
435 sch->dequeue = ops->dequeue;
436 sch->dev = dev;
437 dev_hold(dev);
438 sch->stats_lock = &dev->queue_lock;
439 atomic_set(&sch->refcnt, 1);
Thomas Graf3d54b822005-07-05 14:15:09 -0700440
441 return sch;
442errout:
443 return ERR_PTR(-err);
444}
445
446struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
447{
448 struct Qdisc *sch;
449
450 sch = qdisc_alloc(dev, ops);
451 if (IS_ERR(sch))
452 goto errout;
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 if (!ops->init || ops->init(sch, NULL) == 0)
455 return sch;
456
Thomas Graf0fbbeb12005-08-23 10:12:44 -0700457 qdisc_destroy(sch);
Thomas Graf3d54b822005-07-05 14:15:09 -0700458errout:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return NULL;
460}
461
462/* Under dev->queue_lock and BH! */
463
464void qdisc_reset(struct Qdisc *qdisc)
465{
466 struct Qdisc_ops *ops = qdisc->ops;
467
468 if (ops->reset)
469 ops->reset(qdisc);
470}
471
472/* this is the rcu callback function to clean up a qdisc when there
473 * are no further references to it */
474
475static void __qdisc_destroy(struct rcu_head *head)
476{
477 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
478 struct Qdisc_ops *ops = qdisc->ops;
479
480#ifdef CONFIG_NET_ESTIMATOR
481 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
482#endif
483 write_lock(&qdisc_tree_lock);
484 if (ops->reset)
485 ops->reset(qdisc);
486 if (ops->destroy)
487 ops->destroy(qdisc);
488 write_unlock(&qdisc_tree_lock);
489 module_put(ops->owner);
490
491 dev_put(qdisc->dev);
492 kfree((char *) qdisc - qdisc->padded);
493}
494
495/* Under dev->queue_lock and BH! */
496
497void qdisc_destroy(struct Qdisc *qdisc)
498{
499 struct list_head cql = LIST_HEAD_INIT(cql);
500 struct Qdisc *cq, *q, *n;
501
502 if (qdisc->flags & TCQ_F_BUILTIN ||
503 !atomic_dec_and_test(&qdisc->refcnt))
504 return;
505
506 if (!list_empty(&qdisc->list)) {
507 if (qdisc->ops->cl_ops == NULL)
508 list_del(&qdisc->list);
509 else
510 list_move(&qdisc->list, &cql);
511 }
512
513 /* unlink inner qdiscs from dev->qdisc_list immediately */
514 list_for_each_entry(cq, &cql, list)
515 list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
516 if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
517 if (q->ops->cl_ops == NULL)
518 list_del_init(&q->list);
519 else
520 list_move_tail(&q->list, &cql);
521 }
522 list_for_each_entry_safe(cq, n, &cql, list)
523 list_del_init(&cq->list);
524
525 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
526}
527
528void dev_activate(struct net_device *dev)
529{
530 /* No queueing discipline is attached to device;
531 create default one i.e. pfifo_fast for devices,
532 which need queueing and noqueue_qdisc for
533 virtual interfaces
534 */
535
536 if (dev->qdisc_sleeping == &noop_qdisc) {
537 struct Qdisc *qdisc;
538 if (dev->tx_queue_len) {
539 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
540 if (qdisc == NULL) {
541 printk(KERN_INFO "%s: activation failed\n", dev->name);
542 return;
543 }
544 write_lock_bh(&qdisc_tree_lock);
545 list_add_tail(&qdisc->list, &dev->qdisc_list);
546 write_unlock_bh(&qdisc_tree_lock);
547 } else {
548 qdisc = &noqueue_qdisc;
549 }
550 write_lock_bh(&qdisc_tree_lock);
551 dev->qdisc_sleeping = qdisc;
552 write_unlock_bh(&qdisc_tree_lock);
553 }
554
Tommy S. Christensencacaddf2005-05-03 16:18:52 -0700555 if (!netif_carrier_ok(dev))
556 /* Delay activation until next carrier-on event */
557 return;
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 spin_lock_bh(&dev->queue_lock);
560 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
561 if (dev->qdisc != &noqueue_qdisc) {
562 dev->trans_start = jiffies;
563 dev_watchdog_up(dev);
564 }
565 spin_unlock_bh(&dev->queue_lock);
566}
567
568void dev_deactivate(struct net_device *dev)
569{
570 struct Qdisc *qdisc;
571
572 spin_lock_bh(&dev->queue_lock);
573 qdisc = dev->qdisc;
574 dev->qdisc = &noop_qdisc;
575
576 qdisc_reset(qdisc);
577
578 spin_unlock_bh(&dev->queue_lock);
579
580 dev_watchdog_down(dev);
581
582 while (test_bit(__LINK_STATE_SCHED, &dev->state))
583 yield();
584
585 spin_unlock_wait(&dev->xmit_lock);
586}
587
588void dev_init_scheduler(struct net_device *dev)
589{
590 qdisc_lock_tree(dev);
591 dev->qdisc = &noop_qdisc;
592 dev->qdisc_sleeping = &noop_qdisc;
593 INIT_LIST_HEAD(&dev->qdisc_list);
594 qdisc_unlock_tree(dev);
595
596 dev_watchdog_init(dev);
597}
598
599void dev_shutdown(struct net_device *dev)
600{
601 struct Qdisc *qdisc;
602
603 qdisc_lock_tree(dev);
604 qdisc = dev->qdisc_sleeping;
605 dev->qdisc = &noop_qdisc;
606 dev->qdisc_sleeping = &noop_qdisc;
607 qdisc_destroy(qdisc);
608#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
609 if ((qdisc = dev->qdisc_ingress) != NULL) {
610 dev->qdisc_ingress = NULL;
611 qdisc_destroy(qdisc);
612 }
613#endif
614 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
615 qdisc_unlock_tree(dev);
616}
617
618EXPORT_SYMBOL(__netdev_watchdog_up);
Denis Vlasenko0a242ef2005-08-11 15:32:53 -0700619EXPORT_SYMBOL(netif_carrier_on);
620EXPORT_SYMBOL(netif_carrier_off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621EXPORT_SYMBOL(noop_qdisc);
622EXPORT_SYMBOL(noop_qdisc_ops);
623EXPORT_SYMBOL(qdisc_create_dflt);
Thomas Graf3d54b822005-07-05 14:15:09 -0700624EXPORT_SYMBOL(qdisc_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625EXPORT_SYMBOL(qdisc_destroy);
626EXPORT_SYMBOL(qdisc_reset);
627EXPORT_SYMBOL(qdisc_restart);
628EXPORT_SYMBOL(qdisc_lock_tree);
629EXPORT_SYMBOL(qdisc_unlock_tree);