Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __NET_PKT_SCHED_H |
| 2 | #define __NET_PKT_SCHED_H |
| 3 | |
Patrick McHardy | 538e43a | 2006-01-08 22:12:03 -0800 | [diff] [blame] | 4 | #include <linux/jiffies.h> |
Patrick McHardy | 641b9e0 | 2007-03-16 01:18:42 -0700 | [diff] [blame] | 5 | #include <linux/ktime.h> |
Jiri Pirko | d8b9605 | 2015-01-13 17:13:43 +0100 | [diff] [blame] | 6 | #include <linux/if_vlan.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <net/sch_generic.h> |
| 8 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 9 | struct qdisc_walker { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | int stop; |
| 11 | int skip; |
| 12 | int count; |
| 13 | int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); |
| 14 | }; |
| 15 | |
Eric Dumazet | 5d944c6 | 2010-03-31 07:06:04 +0000 | [diff] [blame] | 16 | #define QDISC_ALIGNTO 64 |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 17 | #define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | static inline void *qdisc_priv(struct Qdisc *q) |
| 20 | { |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 21 | return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | } |
| 23 | |
| 24 | /* |
| 25 | Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth |
| 26 | |
| 27 | Normal IP packet size ~ 512byte, hence: |
| 28 | |
| 29 | 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for |
| 30 | 10Mbit ethernet. |
| 31 | |
| 32 | 10msec resolution -> <50Kbit/sec. |
| 33 | |
| 34 | The result: [34]86 is not good choice for QoS router :-( |
| 35 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 36 | The things are not so bad, because we may use artificial |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | clock evaluated by integration of network data flow |
| 38 | in the most critical places. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | */ |
| 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | typedef u64 psched_time_t; |
| 42 | typedef long psched_tdiff_t; |
| 43 | |
Jarek Poplawski | a4a710c | 2009-06-08 22:05:13 +0000 | [diff] [blame] | 44 | /* Avoid doing 64 bit divide */ |
| 45 | #define PSCHED_SHIFT 6 |
Jarek Poplawski | ca44d6e | 2009-06-15 02:31:47 -0700 | [diff] [blame] | 46 | #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) |
| 47 | #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Jarek Poplawski | ca44d6e | 2009-06-15 02:31:47 -0700 | [diff] [blame] | 49 | #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) |
Patrick McHardy | a084980 | 2007-03-23 11:28:30 -0700 | [diff] [blame] | 50 | #define PSCHED_PASTPERFECT 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 52 | static inline psched_time_t psched_get_time(void) |
| 53 | { |
Eric Dumazet | d2de875 | 2014-08-22 18:32:09 -0700 | [diff] [blame] | 54 | return PSCHED_NS2TICKS(ktime_get_ns()); |
Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 55 | } |
| 56 | |
Patrick McHardy | 03cc45c | 2007-03-23 11:29:11 -0700 | [diff] [blame] | 57 | static inline psched_tdiff_t |
| 58 | psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) |
| 59 | { |
| 60 | return min(tv1 - tv2, bound); |
| 61 | } |
| 62 | |
Patrick McHardy | 4179477 | 2007-03-16 01:19:15 -0700 | [diff] [blame] | 63 | struct qdisc_watchdog { |
Eric Dumazet | a9efad8 | 2016-05-23 14:24:56 -0700 | [diff] [blame] | 64 | u64 last_expires; |
Patrick McHardy | 4179477 | 2007-03-16 01:19:15 -0700 | [diff] [blame] | 65 | struct hrtimer timer; |
| 66 | struct Qdisc *qdisc; |
| 67 | }; |
| 68 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 69 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); |
Eric Dumazet | 45f50be | 2016-06-10 16:41:39 -0700 | [diff] [blame] | 70 | void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); |
Jiri Pirko | 34c5d29 | 2013-02-12 00:12:04 +0000 | [diff] [blame] | 71 | |
| 72 | static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, |
| 73 | psched_time_t expires) |
| 74 | { |
Eric Dumazet | 45f50be | 2016-06-10 16:41:39 -0700 | [diff] [blame] | 75 | qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); |
Jiri Pirko | 34c5d29 | 2013-02-12 00:12:04 +0000 | [diff] [blame] | 76 | } |
| 77 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 78 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); |
Patrick McHardy | 4179477 | 2007-03-16 01:19:15 -0700 | [diff] [blame] | 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | extern struct Qdisc_ops pfifo_qdisc_ops; |
| 81 | extern struct Qdisc_ops bfifo_qdisc_ops; |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 82 | extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 84 | int fifo_set_limit(struct Qdisc *q, unsigned int limit); |
| 85 | struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, |
| 86 | unsigned int limit); |
Patrick McHardy | fb0305c | 2008-07-05 23:40:21 -0700 | [diff] [blame] | 87 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 88 | int register_qdisc(struct Qdisc_ops *qops); |
| 89 | int unregister_qdisc(struct Qdisc_ops *qops); |
stephen hemminger | 6da7c8f | 2013-08-27 16:19:08 -0700 | [diff] [blame] | 90 | void qdisc_get_default(char *id, size_t len); |
| 91 | int qdisc_set_default(const char *id); |
| 92 | |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 93 | void qdisc_hash_add(struct Qdisc *q); |
| 94 | void qdisc_hash_del(struct Qdisc *q); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 95 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); |
| 96 | struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); |
| 97 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, |
| 98 | struct nlattr *tab); |
| 99 | void qdisc_put_rtab(struct qdisc_rate_table *tab); |
| 100 | void qdisc_put_stab(struct qdisc_size_table *tab); |
Florian Westphal | 6e765a0 | 2014-06-11 20:35:18 +0200 | [diff] [blame] | 101 | void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 102 | int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
| 103 | struct net_device *dev, struct netdev_queue *txq, |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 104 | spinlock_t *root_lock, bool validate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 106 | void __qdisc_run(struct Qdisc *q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 108 | static inline void qdisc_run(struct Qdisc *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | { |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 110 | if (qdisc_run_begin(q)) |
David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 111 | __qdisc_run(q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | } |
| 113 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 114 | int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
Daniel Borkmann | 3b3ae88 | 2015-08-26 23:00:06 +0200 | [diff] [blame] | 115 | struct tcf_result *res, bool compat_mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Jiri Pirko | d8b9605 | 2015-01-13 17:13:43 +0100 | [diff] [blame] | 117 | static inline __be16 tc_skb_protocol(const struct sk_buff *skb) |
| 118 | { |
| 119 | /* We need to take extra care in case the skb came via |
| 120 | * vlan accelerated path. In that case, use skb->vlan_proto |
| 121 | * as the original vlan header was already stripped. |
| 122 | */ |
Jiri Pirko | df8a39d | 2015-01-13 17:13:44 +0100 | [diff] [blame] | 123 | if (skb_vlan_tag_present(skb)) |
Jiri Pirko | d8b9605 | 2015-01-13 17:13:43 +0100 | [diff] [blame] | 124 | return skb->vlan_proto; |
| 125 | return skb->protocol; |
| 126 | } |
| 127 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | /* Calculate maximal size of packet seen by hard_start_xmit |
| 129 | routine of this device. |
| 130 | */ |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 131 | static inline unsigned int psched_mtu(const struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | { |
Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 133 | return dev->mtu + dev->hard_header_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | #endif |