Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __NET_SCHED_GENERIC_H |
| 2 | #define __NET_SCHED_GENERIC_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/netdevice.h> |
| 5 | #include <linux/types.h> |
| 6 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/pkt_sched.h> |
| 8 | #include <linux/pkt_cls.h> |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 9 | #include <linux/percpu.h> |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 10 | #include <linux/dynamic_queue_limits.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <net/gen_stats.h> |
Thomas Graf | be577dd | 2007-03-22 11:55:50 -0700 | [diff] [blame] | 12 | #include <net/rtnetlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
| 14 | struct Qdisc_ops; |
| 15 | struct qdisc_walker; |
| 16 | struct tcf_walker; |
| 17 | struct module; |
| 18 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 19 | struct qdisc_rate_table { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | struct tc_ratespec rate; |
| 21 | u32 data[256]; |
| 22 | struct qdisc_rate_table *next; |
| 23 | int refcnt; |
| 24 | }; |
| 25 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 26 | enum qdisc_state_t { |
David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 27 | __QDISC_STATE_SCHED, |
David S. Miller | a9312ae | 2008-08-17 21:51:03 -0700 | [diff] [blame] | 28 | __QDISC_STATE_DEACTIVATED, |
David S. Miller | e2627c8 | 2008-07-16 00:56:32 -0700 | [diff] [blame] | 29 | }; |
| 30 | |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 31 | struct qdisc_size_table { |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 32 | struct rcu_head rcu; |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 33 | struct list_head list; |
| 34 | struct tc_sizespec szopts; |
| 35 | int refcnt; |
| 36 | u16 data[]; |
| 37 | }; |
| 38 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 39 | /* similar to sk_buff_head, but skb->prev pointer is undefined. */ |
| 40 | struct qdisc_skb_head { |
| 41 | struct sk_buff *head; |
| 42 | struct sk_buff *tail; |
| 43 | __u32 qlen; |
| 44 | spinlock_t lock; |
| 45 | }; |
| 46 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 47 | struct Qdisc { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 48 | int (*enqueue)(struct sk_buff *skb, |
| 49 | struct Qdisc *sch, |
| 50 | struct sk_buff **to_free); |
| 51 | struct sk_buff * (*dequeue)(struct Qdisc *sch); |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 52 | unsigned int flags; |
Jarek Poplawski | b00355d | 2009-02-01 01:12:42 -0800 | [diff] [blame] | 53 | #define TCQ_F_BUILTIN 1 |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 54 | #define TCQ_F_INGRESS 2 |
| 55 | #define TCQ_F_CAN_BYPASS 4 |
| 56 | #define TCQ_F_MQROOT 8 |
Eric Dumazet | 1abbe13 | 2012-12-11 15:54:33 +0000 | [diff] [blame] | 57 | #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for |
| 58 | * q->dev_queue : It can test |
| 59 | * netif_xmit_frozen_or_stopped() before |
| 60 | * dequeueing next packet. |
| 61 | * Its true for MQ/MQPRIO slaves, or non |
| 62 | * multiqueue device. |
| 63 | */ |
Jarek Poplawski | b00355d | 2009-02-01 01:12:42 -0800 | [diff] [blame] | 64 | #define TCQ_F_WARN_NONWC (1 << 16) |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 65 | #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ |
Eric Dumazet | 4eaf3b8 | 2015-12-01 20:08:51 -0800 | [diff] [blame] | 66 | #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : |
| 67 | * qdisc_tree_decrease_qlen() should stop. |
| 68 | */ |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 69 | u32 limit; |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 70 | const struct Qdisc_ops *ops; |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 71 | struct qdisc_size_table __rcu *stab; |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 72 | struct hlist_node hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | u32 handle; |
| 74 | u32 parent; |
David S. Miller | 72b25a9 | 2008-07-18 20:54:17 -0700 | [diff] [blame] | 75 | void *u32_node; |
| 76 | |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 77 | struct netdev_queue *dev_queue; |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 78 | |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 79 | struct gnet_stats_rate_est64 rate_est; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 80 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
| 81 | struct gnet_stats_queue __percpu *cpu_qstats; |
| 82 | |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 83 | /* |
| 84 | * For performance sake on SMP, we put highly modified fields at the end |
| 85 | */ |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 86 | struct sk_buff *gso_skb ____cacheline_aligned_in_smp; |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 87 | struct qdisc_skb_head q; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 88 | struct gnet_stats_basic_packed bstats; |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 89 | seqcount_t running; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 90 | struct gnet_stats_queue qstats; |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 91 | unsigned long state; |
| 92 | struct Qdisc *next_sched; |
| 93 | struct sk_buff *skb_bad_txq; |
Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 94 | struct rcu_head rcu_head; |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 95 | int padded; |
| 96 | atomic_t refcnt; |
| 97 | |
| 98 | spinlock_t busylock ____cacheline_aligned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | }; |
| 100 | |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 101 | static inline bool qdisc_is_running(const struct Qdisc *qdisc) |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 102 | { |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 103 | return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) |
| 107 | { |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 108 | if (qdisc_is_running(qdisc)) |
| 109 | return false; |
Eric Dumazet | 52fbb29 | 2016-06-09 07:45:11 -0700 | [diff] [blame] | 110 | /* Variant of write_seqcount_begin() telling lockdep a trylock |
| 111 | * was attempted. |
| 112 | */ |
| 113 | raw_write_seqcount_begin(&qdisc->running); |
| 114 | seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 115 | return true; |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | static inline void qdisc_run_end(struct Qdisc *qdisc) |
| 119 | { |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 120 | write_seqcount_end(&qdisc->running); |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 121 | } |
| 122 | |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 123 | static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) |
| 124 | { |
| 125 | return qdisc->flags & TCQ_F_ONETXQUEUE; |
| 126 | } |
| 127 | |
| 128 | static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) |
| 129 | { |
| 130 | #ifdef CONFIG_BQL |
| 131 | /* Non-BQL migrated drivers will return 0, too. */ |
| 132 | return dql_avail(&txq->dql); |
| 133 | #else |
| 134 | return 0; |
| 135 | #endif |
| 136 | } |
| 137 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 138 | struct Qdisc_class_ops { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | /* Child qdisc manipulation */ |
Jarek Poplawski | 926e61b | 2009-09-15 02:53:07 -0700 | [diff] [blame] | 140 | struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | int (*graft)(struct Qdisc *, unsigned long cl, |
| 142 | struct Qdisc *, struct Qdisc **); |
| 143 | struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); |
Patrick McHardy | 43effa1 | 2006-11-29 17:35:48 -0800 | [diff] [blame] | 144 | void (*qlen_notify)(struct Qdisc *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
| 146 | /* Class manipulation routines */ |
| 147 | unsigned long (*get)(struct Qdisc *, u32 classid); |
| 148 | void (*put)(struct Qdisc *, unsigned long); |
| 149 | int (*change)(struct Qdisc *, u32, u32, |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 150 | struct nlattr **, unsigned long *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | int (*delete)(struct Qdisc *, unsigned long); |
| 152 | void (*walk)(struct Qdisc *, struct qdisc_walker * arg); |
| 153 | |
| 154 | /* Filter manipulation */ |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 155 | struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); |
Daniel Borkmann | 92c075d | 2016-06-06 22:50:39 +0200 | [diff] [blame] | 156 | bool (*tcf_cl_offload)(u32 classid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, |
| 158 | u32 classid); |
| 159 | void (*unbind_tcf)(struct Qdisc *, unsigned long); |
| 160 | |
| 161 | /* rtnetlink specific */ |
| 162 | int (*dump)(struct Qdisc *, unsigned long, |
| 163 | struct sk_buff *skb, struct tcmsg*); |
| 164 | int (*dump_stats)(struct Qdisc *, unsigned long, |
| 165 | struct gnet_dump *); |
| 166 | }; |
| 167 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 168 | struct Qdisc_ops { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | struct Qdisc_ops *next; |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 170 | const struct Qdisc_class_ops *cl_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | char id[IFNAMSIZ]; |
| 172 | int priv_size; |
| 173 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 174 | int (*enqueue)(struct sk_buff *skb, |
| 175 | struct Qdisc *sch, |
| 176 | struct sk_buff **to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | struct sk_buff * (*dequeue)(struct Qdisc *); |
Jarek Poplawski | 90d841fd | 2008-10-31 00:43:45 -0700 | [diff] [blame] | 178 | struct sk_buff * (*peek)(struct Qdisc *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 180 | int (*init)(struct Qdisc *, struct nlattr *arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | void (*reset)(struct Qdisc *); |
| 182 | void (*destroy)(struct Qdisc *); |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 183 | int (*change)(struct Qdisc *, struct nlattr *arg); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 184 | void (*attach)(struct Qdisc *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | |
| 186 | int (*dump)(struct Qdisc *, struct sk_buff *); |
| 187 | int (*dump_stats)(struct Qdisc *, struct gnet_dump *); |
| 188 | |
| 189 | struct module *owner; |
| 190 | }; |
| 191 | |
| 192 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 193 | struct tcf_result { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | unsigned long class; |
| 195 | u32 classid; |
| 196 | }; |
| 197 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 198 | struct tcf_proto_ops { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 199 | struct list_head head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | char kind[IFNAMSIZ]; |
| 201 | |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 202 | int (*classify)(struct sk_buff *, |
| 203 | const struct tcf_proto *, |
| 204 | struct tcf_result *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | int (*init)(struct tcf_proto*); |
Cong Wang | 1e052be | 2015-03-06 11:47:59 -0800 | [diff] [blame] | 206 | bool (*destroy)(struct tcf_proto*, bool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
| 208 | unsigned long (*get)(struct tcf_proto*, u32 handle); |
Benjamin LaHaise | c1b5273 | 2013-01-14 05:15:39 +0000 | [diff] [blame] | 209 | int (*change)(struct net *net, struct sk_buff *, |
Eric W. Biederman | af4c664 | 2012-05-25 13:42:45 -0600 | [diff] [blame] | 210 | struct tcf_proto*, unsigned long, |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 211 | u32 handle, struct nlattr **, |
Cong Wang | 2f7ef2f | 2014-04-25 13:54:06 -0700 | [diff] [blame] | 212 | unsigned long *, bool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | int (*delete)(struct tcf_proto*, unsigned long); |
| 214 | void (*walk)(struct tcf_proto*, struct tcf_walker *arg); |
| 215 | |
| 216 | /* rtnetlink specific */ |
WANG Cong | 832d1d5 | 2014-01-09 16:14:01 -0800 | [diff] [blame] | 217 | int (*dump)(struct net*, struct tcf_proto*, unsigned long, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | struct sk_buff *skb, struct tcmsg*); |
| 219 | |
| 220 | struct module *owner; |
| 221 | }; |
| 222 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 223 | struct tcf_proto { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | /* Fast access part */ |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 225 | struct tcf_proto __rcu *next; |
| 226 | void __rcu *root; |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 227 | int (*classify)(struct sk_buff *, |
| 228 | const struct tcf_proto *, |
| 229 | struct tcf_result *); |
Al Viro | 66c6f52 | 2006-11-20 18:07:51 -0800 | [diff] [blame] | 230 | __be16 protocol; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | |
| 232 | /* All the rest */ |
| 233 | u32 prio; |
| 234 | u32 classid; |
| 235 | struct Qdisc *q; |
| 236 | void *data; |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 237 | const struct tcf_proto_ops *ops; |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 238 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | }; |
| 240 | |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 241 | struct qdisc_skb_cb { |
| 242 | unsigned int pkt_len; |
Jiri Pirko | df4ab5b | 2012-07-20 02:28:49 +0000 | [diff] [blame] | 243 | u16 slave_dev_queue_mapping; |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 244 | u16 tc_classid; |
Eric Dumazet | 2571178 | 2014-09-18 08:02:05 -0700 | [diff] [blame] | 245 | #define QDISC_CB_PRIV_LEN 20 |
| 246 | unsigned char data[QDISC_CB_PRIV_LEN]; |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 247 | }; |
| 248 | |
David S. Miller | 16bda13 | 2012-02-06 15:14:37 -0500 | [diff] [blame] | 249 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
| 250 | { |
| 251 | struct qdisc_skb_cb *qcb; |
Eric Dumazet | 5ee31c68 | 2012-06-12 06:03:51 +0000 | [diff] [blame] | 252 | |
| 253 | BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); |
David S. Miller | 16bda13 | 2012-02-06 15:14:37 -0500 | [diff] [blame] | 254 | BUILD_BUG_ON(sizeof(qcb->data) < sz); |
| 255 | } |
| 256 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 257 | static inline int qdisc_qlen(const struct Qdisc *q) |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 258 | { |
| 259 | return q->q.qlen; |
| 260 | } |
| 261 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 262 | static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 263 | { |
| 264 | return (struct qdisc_skb_cb *)skb->cb; |
| 265 | } |
| 266 | |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 267 | static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) |
| 268 | { |
| 269 | return &qdisc->q.lock; |
| 270 | } |
| 271 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 272 | static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 273 | { |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 274 | struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); |
| 275 | |
| 276 | return q; |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 277 | } |
| 278 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 279 | static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) |
Jarek Poplawski | 2540e05 | 2008-08-21 05:11:14 -0700 | [diff] [blame] | 280 | { |
| 281 | return qdisc->dev_queue->qdisc_sleeping; |
| 282 | } |
| 283 | |
David S. Miller | 7e43f11 | 2008-08-02 23:27:37 -0700 | [diff] [blame] | 284 | /* The qdisc root lock is a mechanism by which to top level |
| 285 | * of a qdisc tree can be locked from any qdisc node in the |
| 286 | * forest. This allows changing the configuration of some |
| 287 | * aspect of the qdisc tree while blocking out asynchronous |
| 288 | * qdisc access in the packet processing paths. |
| 289 | * |
| 290 | * It is only legal to do this when the root will not change |
| 291 | * on us. Otherwise we'll potentially lock the wrong qdisc |
| 292 | * root. This is enforced by holding the RTNL semaphore, which |
| 293 | * all users of this lock accessor must do. |
| 294 | */ |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 295 | static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 296 | { |
| 297 | struct Qdisc *root = qdisc_root(qdisc); |
| 298 | |
David S. Miller | 7e43f11 | 2008-08-02 23:27:37 -0700 | [diff] [blame] | 299 | ASSERT_RTNL(); |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 300 | return qdisc_lock(root); |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 301 | } |
| 302 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 303 | static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) |
Jarek Poplawski | f6f9b93 | 2008-08-27 02:25:17 -0700 | [diff] [blame] | 304 | { |
| 305 | struct Qdisc *root = qdisc_root_sleeping(qdisc); |
| 306 | |
| 307 | ASSERT_RTNL(); |
| 308 | return qdisc_lock(root); |
| 309 | } |
| 310 | |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 311 | static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) |
| 312 | { |
| 313 | struct Qdisc *root = qdisc_root_sleeping(qdisc); |
| 314 | |
| 315 | ASSERT_RTNL(); |
| 316 | return &root->running; |
| 317 | } |
| 318 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 319 | static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 320 | { |
| 321 | return qdisc->dev_queue->dev; |
| 322 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 324 | static inline void sch_tree_lock(const struct Qdisc *q) |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 325 | { |
Jarek Poplawski | fe439dd | 2008-08-27 02:27:10 -0700 | [diff] [blame] | 326 | spin_lock_bh(qdisc_root_sleeping_lock(q)); |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 327 | } |
| 328 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 329 | static inline void sch_tree_unlock(const struct Qdisc *q) |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 330 | { |
Jarek Poplawski | fe439dd | 2008-08-27 02:27:10 -0700 | [diff] [blame] | 331 | spin_unlock_bh(qdisc_root_sleeping_lock(q)); |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 332 | } |
| 333 | |
| 334 | #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) |
| 335 | #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Thomas Graf | e41a33e | 2005-07-05 14:14:30 -0700 | [diff] [blame] | 337 | extern struct Qdisc noop_qdisc; |
| 338 | extern struct Qdisc_ops noop_qdisc_ops; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 339 | extern struct Qdisc_ops pfifo_fast_ops; |
| 340 | extern struct Qdisc_ops mq_qdisc_ops; |
Phil Sutter | d66d6c3 | 2015-08-27 21:21:38 +0200 | [diff] [blame] | 341 | extern struct Qdisc_ops noqueue_qdisc_ops; |
stephen hemminger | 6da7c8f | 2013-08-27 16:19:08 -0700 | [diff] [blame] | 342 | extern const struct Qdisc_ops *default_qdisc_ops; |
Eric Dumazet | 1f27cde | 2016-03-02 08:21:43 -0800 | [diff] [blame] | 343 | static inline const struct Qdisc_ops * |
| 344 | get_default_qdisc_ops(const struct net_device *dev, int ntx) |
| 345 | { |
| 346 | return ntx < dev->real_num_tx_queues ? |
| 347 | default_qdisc_ops : &pfifo_fast_ops; |
| 348 | } |
Thomas Graf | e41a33e | 2005-07-05 14:14:30 -0700 | [diff] [blame] | 349 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 350 | struct Qdisc_class_common { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 351 | u32 classid; |
| 352 | struct hlist_node hnode; |
| 353 | }; |
| 354 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 355 | struct Qdisc_class_hash { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 356 | struct hlist_head *hash; |
| 357 | unsigned int hashsize; |
| 358 | unsigned int hashmask; |
| 359 | unsigned int hashelems; |
| 360 | }; |
| 361 | |
| 362 | static inline unsigned int qdisc_class_hash(u32 id, u32 mask) |
| 363 | { |
| 364 | id ^= id >> 8; |
| 365 | id ^= id >> 4; |
| 366 | return id & mask; |
| 367 | } |
| 368 | |
| 369 | static inline struct Qdisc_class_common * |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 370 | qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 371 | { |
| 372 | struct Qdisc_class_common *cl; |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 373 | unsigned int h; |
| 374 | |
| 375 | h = qdisc_class_hash(id, hash->hashmask); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 376 | hlist_for_each_entry(cl, &hash->hash[h], hnode) { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 377 | if (cl->classid == id) |
| 378 | return cl; |
| 379 | } |
| 380 | return NULL; |
| 381 | } |
| 382 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 383 | int qdisc_class_hash_init(struct Qdisc_class_hash *); |
| 384 | void qdisc_class_hash_insert(struct Qdisc_class_hash *, |
| 385 | struct Qdisc_class_common *); |
| 386 | void qdisc_class_hash_remove(struct Qdisc_class_hash *, |
| 387 | struct Qdisc_class_common *); |
| 388 | void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); |
| 389 | void qdisc_class_hash_destroy(struct Qdisc_class_hash *); |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 390 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 391 | void dev_init_scheduler(struct net_device *dev); |
| 392 | void dev_shutdown(struct net_device *dev); |
| 393 | void dev_activate(struct net_device *dev); |
| 394 | void dev_deactivate(struct net_device *dev); |
| 395 | void dev_deactivate_many(struct list_head *head); |
| 396 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, |
| 397 | struct Qdisc *qdisc); |
| 398 | void qdisc_reset(struct Qdisc *qdisc); |
| 399 | void qdisc_destroy(struct Qdisc *qdisc); |
WANG Cong | 2ccccf5 | 2016-02-25 14:55:01 -0800 | [diff] [blame] | 400 | void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, |
| 401 | unsigned int len); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 402 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
stephen hemminger | d2a7f26 | 2013-08-31 10:15:50 -0700 | [diff] [blame] | 403 | const struct Qdisc_ops *ops); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 404 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
stephen hemminger | d2a7f26 | 2013-08-31 10:15:50 -0700 | [diff] [blame] | 405 | const struct Qdisc_ops *ops, u32 parentid); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 406 | void __qdisc_calculate_pkt_len(struct sk_buff *skb, |
| 407 | const struct qdisc_size_table *stab); |
Cong Wang | 1e052be | 2015-03-06 11:47:59 -0800 | [diff] [blame] | 408 | bool tcf_destroy(struct tcf_proto *tp, bool force); |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 409 | void tcf_destroy_chain(struct tcf_proto __rcu **fl); |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 410 | int skb_do_redirect(struct sk_buff *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | |
Daniel Borkmann | fdc5432 | 2016-01-07 15:50:22 +0100 | [diff] [blame] | 412 | static inline bool skb_at_tc_ingress(const struct sk_buff *skb) |
| 413 | { |
| 414 | #ifdef CONFIG_NET_CLS_ACT |
| 415 | return G_TC_AT(skb->tc_verd) & AT_INGRESS; |
| 416 | #else |
| 417 | return false; |
| 418 | #endif |
| 419 | } |
| 420 | |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 421 | /* Reset all TX qdiscs greater then index of a device. */ |
| 422 | static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) |
David S. Miller | 5aa7099 | 2008-07-08 22:59:10 -0700 | [diff] [blame] | 423 | { |
John Fastabend | 4ef6acf | 2010-07-01 13:21:35 +0000 | [diff] [blame] | 424 | struct Qdisc *qdisc; |
| 425 | |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 426 | for (; i < dev->num_tx_queues; i++) { |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 427 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); |
John Fastabend | 4ef6acf | 2010-07-01 13:21:35 +0000 | [diff] [blame] | 428 | if (qdisc) { |
| 429 | spin_lock_bh(qdisc_lock(qdisc)); |
| 430 | qdisc_reset(qdisc); |
| 431 | spin_unlock_bh(qdisc_lock(qdisc)); |
| 432 | } |
| 433 | } |
David S. Miller | 5aa7099 | 2008-07-08 22:59:10 -0700 | [diff] [blame] | 434 | } |
| 435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | static inline void qdisc_reset_all_tx(struct net_device *dev) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 437 | { |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 438 | qdisc_reset_all_tx_gt(dev, 0); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 439 | } |
| 440 | |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 441 | /* Are all TX queues of the device empty? */ |
| 442 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) |
| 443 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 444 | unsigned int i; |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 445 | |
| 446 | rcu_read_lock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 447 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 448 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 449 | const struct Qdisc *q = rcu_dereference(txq->qdisc); |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 450 | |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 451 | if (q->q.qlen) { |
| 452 | rcu_read_unlock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 453 | return false; |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 454 | } |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 455 | } |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 456 | rcu_read_unlock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 457 | return true; |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 458 | } |
| 459 | |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 460 | /* Are any of the TX qdiscs changing? */ |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 461 | static inline bool qdisc_tx_changing(const struct net_device *dev) |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 462 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 463 | unsigned int i; |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 464 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 465 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 466 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 467 | if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 468 | return true; |
| 469 | } |
| 470 | return false; |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 471 | } |
| 472 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 473 | /* Is the device using the noop qdisc on all queues? */ |
David S. Miller | 0529794 | 2008-07-08 23:01:27 -0700 | [diff] [blame] | 474 | static inline bool qdisc_tx_is_noop(const struct net_device *dev) |
| 475 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 476 | unsigned int i; |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 477 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 478 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 479 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da4 | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 480 | if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 481 | return false; |
| 482 | } |
| 483 | return true; |
David S. Miller | 0529794 | 2008-07-08 23:01:27 -0700 | [diff] [blame] | 484 | } |
| 485 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 486 | static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 487 | { |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 488 | return qdisc_skb_cb(skb)->pkt_len; |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 489 | } |
| 490 | |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 491 | /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 492 | enum net_xmit_qdisc_t { |
| 493 | __NET_XMIT_STOLEN = 0x00010000, |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 494 | __NET_XMIT_BYPASS = 0x00020000, |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 495 | }; |
| 496 | |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 497 | #ifdef CONFIG_NET_CLS_ACT |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 498 | #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 499 | #else |
| 500 | #define net_xmit_drop_count(e) (1) |
| 501 | #endif |
| 502 | |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 503 | static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, |
| 504 | const struct Qdisc *sch) |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 505 | { |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 506 | #ifdef CONFIG_NET_SCHED |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 507 | struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); |
| 508 | |
| 509 | if (stab) |
| 510 | __qdisc_calculate_pkt_len(skb, stab); |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 511 | #endif |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 512 | } |
| 513 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 514 | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 515 | struct sk_buff **to_free) |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 516 | { |
| 517 | qdisc_calculate_pkt_len(skb, sch); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 518 | return sch->enqueue(skb, sch, to_free); |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 519 | } |
| 520 | |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 521 | static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) |
| 522 | { |
| 523 | return q->flags & TCQ_F_CPUSTATS; |
| 524 | } |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 525 | |
Amir Vadai | 3804070 | 2016-05-13 12:55:35 +0000 | [diff] [blame] | 526 | static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, |
| 527 | __u64 bytes, __u32 packets) |
| 528 | { |
| 529 | bstats->bytes += bytes; |
| 530 | bstats->packets += packets; |
| 531 | } |
| 532 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 533 | static inline void bstats_update(struct gnet_stats_basic_packed *bstats, |
| 534 | const struct sk_buff *skb) |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 535 | { |
Amir Vadai | 3804070 | 2016-05-13 12:55:35 +0000 | [diff] [blame] | 536 | _bstats_update(bstats, |
| 537 | qdisc_pkt_len(skb), |
| 538 | skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); |
| 539 | } |
| 540 | |
| 541 | static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, |
| 542 | __u64 bytes, __u32 packets) |
| 543 | { |
| 544 | u64_stats_update_begin(&bstats->syncp); |
| 545 | _bstats_update(&bstats->bstats, bytes, packets); |
| 546 | u64_stats_update_end(&bstats->syncp); |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 547 | } |
| 548 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 549 | static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, |
| 550 | const struct sk_buff *skb) |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 551 | { |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 552 | u64_stats_update_begin(&bstats->syncp); |
| 553 | bstats_update(&bstats->bstats, skb); |
| 554 | u64_stats_update_end(&bstats->syncp); |
| 555 | } |
| 556 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 557 | static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, |
| 558 | const struct sk_buff *skb) |
| 559 | { |
| 560 | bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); |
| 561 | } |
| 562 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 563 | static inline void qdisc_bstats_update(struct Qdisc *sch, |
| 564 | const struct sk_buff *skb) |
| 565 | { |
| 566 | bstats_update(&sch->bstats, skb); |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 567 | } |
| 568 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 569 | static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, |
| 570 | const struct sk_buff *skb) |
| 571 | { |
| 572 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
| 573 | } |
| 574 | |
| 575 | static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, |
| 576 | const struct sk_buff *skb) |
| 577 | { |
| 578 | sch->qstats.backlog += qdisc_pkt_len(skb); |
| 579 | } |
| 580 | |
| 581 | static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) |
| 582 | { |
| 583 | sch->qstats.drops += count; |
| 584 | } |
| 585 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 586 | static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 587 | { |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 588 | qstats->drops++; |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 589 | } |
| 590 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 591 | static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 592 | { |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 593 | qstats->overlimits++; |
| 594 | } |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 595 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 596 | static inline void qdisc_qstats_drop(struct Qdisc *sch) |
| 597 | { |
| 598 | qstats_drop_inc(&sch->qstats); |
| 599 | } |
| 600 | |
| 601 | static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) |
| 602 | { |
Eric Dumazet | eb60a8d | 2016-08-24 10:23:34 -0700 | [diff] [blame] | 603 | this_cpu_inc(sch->cpu_qstats->drops); |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 604 | } |
| 605 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 606 | static inline void qdisc_qstats_overlimit(struct Qdisc *sch) |
| 607 | { |
| 608 | sch->qstats.overlimits++; |
| 609 | } |
| 610 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 611 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 612 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 613 | qh->head = NULL; |
| 614 | qh->tail = NULL; |
| 615 | qh->qlen = 0; |
| 616 | } |
| 617 | |
| 618 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
| 619 | struct qdisc_skb_head *qh) |
| 620 | { |
| 621 | struct sk_buff *last = qh->tail; |
| 622 | |
| 623 | if (last) { |
| 624 | skb->next = NULL; |
| 625 | last->next = skb; |
| 626 | qh->tail = skb; |
| 627 | } else { |
| 628 | qh->tail = skb; |
| 629 | qh->head = skb; |
| 630 | } |
| 631 | qh->qlen++; |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 632 | qdisc_qstats_backlog_inc(sch, skb); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 633 | |
| 634 | return NET_XMIT_SUCCESS; |
| 635 | } |
| 636 | |
| 637 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) |
| 638 | { |
| 639 | return __qdisc_enqueue_tail(skb, sch, &sch->q); |
| 640 | } |
| 641 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 642 | static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 643 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 644 | struct sk_buff *skb = qh->head; |
| 645 | |
| 646 | if (likely(skb != NULL)) { |
| 647 | qh->head = skb->next; |
| 648 | qh->qlen--; |
| 649 | if (qh->head == NULL) |
| 650 | qh->tail = NULL; |
| 651 | skb->next = NULL; |
| 652 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 653 | |
Florian Westphal | ec32336 | 2016-09-18 00:57:32 +0200 | [diff] [blame] | 654 | return skb; |
| 655 | } |
| 656 | |
| 657 | static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) |
| 658 | { |
| 659 | struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); |
| 660 | |
Eric Dumazet | 9190b3b | 2011-01-20 23:31:33 -0800 | [diff] [blame] | 661 | if (likely(skb != NULL)) { |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 662 | qdisc_qstats_backlog_dec(sch, skb); |
Eric Dumazet | 9190b3b | 2011-01-20 23:31:33 -0800 | [diff] [blame] | 663 | qdisc_bstats_update(sch, skb); |
| 664 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 665 | |
| 666 | return skb; |
| 667 | } |
| 668 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 669 | /* Instead of calling kfree_skb() while root qdisc lock is held, |
| 670 | * queue the skb for future freeing at end of __dev_xmit_skb() |
| 671 | */ |
| 672 | static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) |
| 673 | { |
| 674 | skb->next = *to_free; |
| 675 | *to_free = skb; |
| 676 | } |
| 677 | |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 678 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 679 | struct qdisc_skb_head *qh, |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 680 | struct sk_buff **to_free) |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 681 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 682 | struct sk_buff *skb = __qdisc_dequeue_head(qh); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 683 | |
| 684 | if (likely(skb != NULL)) { |
| 685 | unsigned int len = qdisc_pkt_len(skb); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 686 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 687 | qdisc_qstats_backlog_dec(sch, skb); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 688 | __qdisc_drop(skb, to_free); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 689 | return len; |
| 690 | } |
| 691 | |
| 692 | return 0; |
| 693 | } |
| 694 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 695 | static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, |
| 696 | struct sk_buff **to_free) |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 697 | { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 698 | return __qdisc_queue_drop_head(sch, &sch->q, to_free); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 699 | } |
| 700 | |
Patrick McHardy | 48a8f51 | 2008-10-31 00:44:18 -0700 | [diff] [blame] | 701 | static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) |
| 702 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 703 | const struct qdisc_skb_head *qh = &sch->q; |
| 704 | |
| 705 | return qh->head; |
Patrick McHardy | 48a8f51 | 2008-10-31 00:44:18 -0700 | [diff] [blame] | 706 | } |
| 707 | |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 708 | /* generic pseudo peek method for non-work-conserving qdisc */ |
| 709 | static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) |
| 710 | { |
| 711 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 712 | if (!sch->gso_skb) { |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 713 | sch->gso_skb = sch->dequeue(sch); |
WANG Cong | a27758f | 2016-06-03 15:05:57 -0700 | [diff] [blame] | 714 | if (sch->gso_skb) { |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 715 | /* it's still part of the queue */ |
WANG Cong | a27758f | 2016-06-03 15:05:57 -0700 | [diff] [blame] | 716 | qdisc_qstats_backlog_inc(sch, sch->gso_skb); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 717 | sch->q.qlen++; |
WANG Cong | a27758f | 2016-06-03 15:05:57 -0700 | [diff] [blame] | 718 | } |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 719 | } |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 720 | |
| 721 | return sch->gso_skb; |
| 722 | } |
| 723 | |
| 724 | /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ |
| 725 | static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) |
| 726 | { |
| 727 | struct sk_buff *skb = sch->gso_skb; |
| 728 | |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 729 | if (skb) { |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 730 | sch->gso_skb = NULL; |
WANG Cong | a27758f | 2016-06-03 15:05:57 -0700 | [diff] [blame] | 731 | qdisc_qstats_backlog_dec(sch, skb); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 732 | sch->q.qlen--; |
| 733 | } else { |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 734 | skb = sch->dequeue(sch); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 735 | } |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 736 | |
| 737 | return skb; |
| 738 | } |
| 739 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 740 | static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 741 | { |
| 742 | /* |
| 743 | * We do not know the backlog in bytes of this list, it |
| 744 | * is up to the caller to correct it |
| 745 | */ |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 746 | ASSERT_RTNL(); |
| 747 | if (qh->qlen) { |
| 748 | rtnl_kfree_skbs(qh->head, qh->tail); |
| 749 | |
| 750 | qh->head = NULL; |
| 751 | qh->tail = NULL; |
| 752 | qh->qlen = 0; |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 753 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 754 | } |
| 755 | |
| 756 | static inline void qdisc_reset_queue(struct Qdisc *sch) |
| 757 | { |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 758 | __qdisc_reset_queue(&sch->q); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 759 | sch->qstats.backlog = 0; |
| 760 | } |
| 761 | |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 762 | static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, |
| 763 | struct Qdisc **pold) |
| 764 | { |
| 765 | struct Qdisc *old; |
| 766 | |
| 767 | sch_tree_lock(sch); |
| 768 | old = *pold; |
| 769 | *pold = new; |
| 770 | if (old != NULL) { |
WANG Cong | 2ccccf5 | 2016-02-25 14:55:01 -0800 | [diff] [blame] | 771 | qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 772 | qdisc_reset(old); |
| 773 | } |
| 774 | sch_tree_unlock(sch); |
| 775 | |
| 776 | return old; |
| 777 | } |
| 778 | |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 779 | static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) |
| 780 | { |
| 781 | rtnl_kfree_skbs(skb, skb); |
| 782 | qdisc_qstats_drop(sch); |
| 783 | } |
| 784 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 785 | |
| 786 | static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, |
| 787 | struct sk_buff **to_free) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 788 | { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 789 | __qdisc_drop(skb, to_free); |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 790 | qdisc_qstats_drop(sch); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 791 | |
| 792 | return NET_XMIT_DROP; |
| 793 | } |
| 794 | |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 795 | /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how |
| 796 | long it will take to send a packet given its size. |
| 797 | */ |
| 798 | static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) |
| 799 | { |
Jesper Dangaard Brouer | e08b099 | 2007-09-12 16:36:28 +0200 | [diff] [blame] | 800 | int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; |
| 801 | if (slot < 0) |
| 802 | slot = 0; |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 803 | slot >>= rtab->rate.cell_log; |
| 804 | if (slot > 255) |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 805 | return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 806 | return rtab->data[slot]; |
| 807 | } |
| 808 | |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 809 | struct psched_ratecfg { |
Eric Dumazet | 130d3d6 | 2013-06-06 13:56:19 -0700 | [diff] [blame] | 810 | u64 rate_bytes_ps; /* bytes per second */ |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 811 | u32 mult; |
| 812 | u16 overhead; |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 813 | u8 linklayer; |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 814 | u8 shift; |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 815 | }; |
| 816 | |
| 817 | static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, |
| 818 | unsigned int len) |
| 819 | { |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 820 | len += r->overhead; |
| 821 | |
| 822 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) |
| 823 | return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; |
| 824 | |
| 825 | return ((u64)len * r->mult) >> r->shift; |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 826 | } |
| 827 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 828 | void psched_ratecfg_precompute(struct psched_ratecfg *r, |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 829 | const struct tc_ratespec *conf, |
| 830 | u64 rate64); |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 831 | |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 832 | static inline void psched_ratecfg_getrate(struct tc_ratespec *res, |
| 833 | const struct psched_ratecfg *r) |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 834 | { |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 835 | memset(res, 0, sizeof(*res)); |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 836 | |
| 837 | /* legacy struct tc_ratespec has a 32bit @rate field |
| 838 | * Qdisc using 64bit rate should add new attributes |
| 839 | * in order to maintain compatibility. |
| 840 | */ |
| 841 | res->rate = min_t(u64, r->rate_bytes_ps, ~0U); |
| 842 | |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 843 | res->overhead = r->overhead; |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 844 | res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 845 | } |
| 846 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | #endif |