blob: 8ffcc97871c8374acb6fd9088e197cb5c7340e37 [file] [log] [blame]
Eric Dumazet0744dd02011-11-28 05:22:18 +00001#include <linux/skbuff.h>
Jesper Dangaard Brouerc452ed72012-01-24 16:03:33 -05002#include <linux/export.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +00003#include <linux/ip.h>
4#include <linux/ipv6.h>
5#include <linux/if_vlan.h>
6#include <net/ip.h>
Eric Dumazetddbe5032012-07-18 08:11:12 +00007#include <net/ipv6.h>
Daniel Borkmannf77668d2013-03-19 06:39:30 +00008#include <linux/igmp.h>
9#include <linux/icmp.h>
10#include <linux/sctp.h>
11#include <linux/dccp.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +000012#include <linux/if_tunnel.h>
13#include <linux/if_pppox.h>
14#include <linux/ppp_defs.h>
15#include <net/flow_keys.h>
16
Eric Dumazet4d77d2b2011-11-28 20:30:35 +000017/* copy saddr & daddr, possibly using 64bit load/store
18 * Equivalent to : flow->src = iph->saddr;
19 * flow->dst = iph->daddr;
20 */
21static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
22{
23 BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24 offsetof(typeof(*flow), src) + sizeof(flow->src));
25 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
26}
Eric Dumazet0744dd02011-11-28 05:22:18 +000027
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020028/**
29 * skb_flow_get_ports - extract the upper layer ports and return them
30 * @skb: buffer to extract the ports from
31 * @thoff: transport header offset
32 * @ip_proto: protocol for which to get port offset
33 *
34 * The function will try to retrieve the ports at offset thoff + poff where poff
35 * is the protocol port offset returned from proto_ports_offset
36 */
David S. Miller690e36e2014-08-23 12:13:41 -070037__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
38 void *data, int hlen)
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020039{
40 int poff = proto_ports_offset(ip_proto);
41
David S. Miller690e36e2014-08-23 12:13:41 -070042 if (!data) {
43 data = skb->data;
44 hlen = skb_headlen(skb);
45 }
46
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020047 if (poff >= 0) {
48 __be32 *ports, _ports;
49
David S. Miller690e36e2014-08-23 12:13:41 -070050 ports = __skb_header_pointer(skb, thoff + poff,
51 sizeof(_ports), data, hlen, &_ports);
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020052 if (ports)
53 return *ports;
54 }
55
56 return 0;
57}
David S. Miller690e36e2014-08-23 12:13:41 -070058EXPORT_SYMBOL(__skb_flow_get_ports);
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020059
David S. Miller690e36e2014-08-23 12:13:41 -070060bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, void *data, int hlen)
Eric Dumazet0744dd02011-11-28 05:22:18 +000061{
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020062 int nhoff = skb_network_offset(skb);
Eric Dumazet0744dd02011-11-28 05:22:18 +000063 u8 ip_proto;
64 __be16 proto = skb->protocol;
65
David S. Miller690e36e2014-08-23 12:13:41 -070066 if (!data) {
67 data = skb->data;
68 hlen = skb_headlen(skb);
69 }
70
Eric Dumazet0744dd02011-11-28 05:22:18 +000071 memset(flow, 0, sizeof(*flow));
72
73again:
74 switch (proto) {
Joe Perches2b8837a2014-03-12 10:04:17 -070075 case htons(ETH_P_IP): {
Eric Dumazet0744dd02011-11-28 05:22:18 +000076 const struct iphdr *iph;
77 struct iphdr _iph;
78ip:
David S. Miller690e36e2014-08-23 12:13:41 -070079 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
Jason Wang6f092342013-11-01 15:01:10 +080080 if (!iph || iph->ihl < 5)
Eric Dumazet0744dd02011-11-28 05:22:18 +000081 return false;
Eric Dumazet3797d3e2013-11-07 08:37:28 -080082 nhoff += iph->ihl * 4;
Eric Dumazet0744dd02011-11-28 05:22:18 +000083
Eric Dumazet3797d3e2013-11-07 08:37:28 -080084 ip_proto = iph->protocol;
Eric Dumazet0744dd02011-11-28 05:22:18 +000085 if (ip_is_fragment(iph))
86 ip_proto = 0;
Eric Dumazet3797d3e2013-11-07 08:37:28 -080087
Eric Dumazet4d77d2b2011-11-28 20:30:35 +000088 iph_to_flow_copy_addrs(flow, iph);
Eric Dumazet0744dd02011-11-28 05:22:18 +000089 break;
90 }
Joe Perches2b8837a2014-03-12 10:04:17 -070091 case htons(ETH_P_IPV6): {
Eric Dumazet0744dd02011-11-28 05:22:18 +000092 const struct ipv6hdr *iph;
93 struct ipv6hdr _iph;
Tom Herbert19469a82014-07-01 21:33:01 -070094 __be32 flow_label;
95
Eric Dumazet0744dd02011-11-28 05:22:18 +000096ipv6:
David S. Miller690e36e2014-08-23 12:13:41 -070097 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
Eric Dumazet0744dd02011-11-28 05:22:18 +000098 if (!iph)
99 return false;
100
101 ip_proto = iph->nexthdr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000102 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
103 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
Eric Dumazet0744dd02011-11-28 05:22:18 +0000104 nhoff += sizeof(struct ipv6hdr);
Tom Herbert19469a82014-07-01 21:33:01 -0700105
106 flow_label = ip6_flowlabel(iph);
107 if (flow_label) {
108 /* Awesome, IPv6 packet has a flow label so we can
109 * use that to represent the ports without any
110 * further dissection.
111 */
112 flow->n_proto = proto;
113 flow->ip_proto = ip_proto;
114 flow->ports = flow_label;
115 flow->thoff = (u16)nhoff;
116
117 return true;
118 }
119
Eric Dumazet0744dd02011-11-28 05:22:18 +0000120 break;
121 }
Joe Perches2b8837a2014-03-12 10:04:17 -0700122 case htons(ETH_P_8021AD):
123 case htons(ETH_P_8021Q): {
Eric Dumazet0744dd02011-11-28 05:22:18 +0000124 const struct vlan_hdr *vlan;
125 struct vlan_hdr _vlan;
126
David S. Miller690e36e2014-08-23 12:13:41 -0700127 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
Eric Dumazet0744dd02011-11-28 05:22:18 +0000128 if (!vlan)
129 return false;
130
131 proto = vlan->h_vlan_encapsulated_proto;
132 nhoff += sizeof(*vlan);
133 goto again;
134 }
Joe Perches2b8837a2014-03-12 10:04:17 -0700135 case htons(ETH_P_PPP_SES): {
Eric Dumazet0744dd02011-11-28 05:22:18 +0000136 struct {
137 struct pppoe_hdr hdr;
138 __be16 proto;
139 } *hdr, _hdr;
David S. Miller690e36e2014-08-23 12:13:41 -0700140 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
Eric Dumazet0744dd02011-11-28 05:22:18 +0000141 if (!hdr)
142 return false;
143 proto = hdr->proto;
144 nhoff += PPPOE_SES_HLEN;
145 switch (proto) {
Joe Perches2b8837a2014-03-12 10:04:17 -0700146 case htons(PPP_IP):
Eric Dumazet0744dd02011-11-28 05:22:18 +0000147 goto ip;
Joe Perches2b8837a2014-03-12 10:04:17 -0700148 case htons(PPP_IPV6):
Eric Dumazet0744dd02011-11-28 05:22:18 +0000149 goto ipv6;
150 default:
151 return false;
152 }
153 }
154 default:
155 return false;
156 }
157
158 switch (ip_proto) {
159 case IPPROTO_GRE: {
160 struct gre_hdr {
161 __be16 flags;
162 __be16 proto;
163 } *hdr, _hdr;
164
David S. Miller690e36e2014-08-23 12:13:41 -0700165 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
Eric Dumazet0744dd02011-11-28 05:22:18 +0000166 if (!hdr)
167 return false;
168 /*
169 * Only look inside GRE if version zero and no
170 * routing
171 */
172 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
173 proto = hdr->proto;
174 nhoff += 4;
175 if (hdr->flags & GRE_CSUM)
176 nhoff += 4;
177 if (hdr->flags & GRE_KEY)
178 nhoff += 4;
179 if (hdr->flags & GRE_SEQ)
180 nhoff += 4;
Michael Daltone1733de2013-03-11 06:52:28 +0000181 if (proto == htons(ETH_P_TEB)) {
182 const struct ethhdr *eth;
183 struct ethhdr _eth;
184
David S. Miller690e36e2014-08-23 12:13:41 -0700185 eth = __skb_header_pointer(skb, nhoff,
186 sizeof(_eth),
187 data, hlen, &_eth);
Michael Daltone1733de2013-03-11 06:52:28 +0000188 if (!eth)
189 return false;
190 proto = eth->h_proto;
191 nhoff += sizeof(*eth);
192 }
Eric Dumazet0744dd02011-11-28 05:22:18 +0000193 goto again;
194 }
195 break;
196 }
197 case IPPROTO_IPIP:
Tom Herbertfca41892013-07-29 11:07:36 -0700198 proto = htons(ETH_P_IP);
199 goto ip;
Tom Herbertb438f942013-07-29 11:07:42 -0700200 case IPPROTO_IPV6:
201 proto = htons(ETH_P_IPV6);
202 goto ipv6;
Eric Dumazet0744dd02011-11-28 05:22:18 +0000203 default:
204 break;
205 }
206
Govindarajulu Varadarajane0f31d82014-06-23 16:07:58 +0530207 flow->n_proto = proto;
Eric Dumazet0744dd02011-11-28 05:22:18 +0000208 flow->ip_proto = ip_proto;
David S. Miller690e36e2014-08-23 12:13:41 -0700209 flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen);
Daniel Borkmann8ed78162013-03-19 06:39:29 +0000210 flow->thoff = (u16) nhoff;
211
Eric Dumazet0744dd02011-11-28 05:22:18 +0000212 return true;
213}
David S. Miller690e36e2014-08-23 12:13:41 -0700214EXPORT_SYMBOL(__skb_flow_dissect);
Cong Wang441d9d32013-01-21 00:39:24 +0000215
216static u32 hashrnd __read_mostly;
Hannes Frederic Sowa66415cf2013-10-23 20:06:00 +0200217static __always_inline void __flow_hash_secret_init(void)
218{
219 net_get_random_once(&hashrnd, sizeof(hashrnd));
220}
221
222static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
223{
224 __flow_hash_secret_init();
225 return jhash_3words(a, b, c, hashrnd);
226}
227
Tom Herbert5ed20a62014-07-01 21:32:05 -0700228static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
229{
230 u32 hash;
231
232 /* get a consistent hash (same value on both flow directions) */
233 if (((__force u32)keys->dst < (__force u32)keys->src) ||
234 (((__force u32)keys->dst == (__force u32)keys->src) &&
235 ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
236 swap(keys->dst, keys->src);
237 swap(keys->port16[0], keys->port16[1]);
238 }
239
240 hash = __flow_hash_3words((__force u32)keys->dst,
241 (__force u32)keys->src,
242 (__force u32)keys->ports);
243 if (!hash)
244 hash = 1;
245
246 return hash;
247}
248
249u32 flow_hash_from_keys(struct flow_keys *keys)
250{
251 return __flow_hash_from_keys(keys);
252}
253EXPORT_SYMBOL(flow_hash_from_keys);
254
Cong Wang441d9d32013-01-21 00:39:24 +0000255/*
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800256 * __skb_get_hash: calculate a flow hash based on src/dst addresses
Tom Herbert61b905d2014-03-24 15:34:47 -0700257 * and src/dst port numbers. Sets hash in skb to non-zero hash value
258 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
Cong Wang441d9d32013-01-21 00:39:24 +0000259 * if hash is a canonical 4-tuple hash over transport ports.
260 */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800261void __skb_get_hash(struct sk_buff *skb)
Cong Wang441d9d32013-01-21 00:39:24 +0000262{
263 struct flow_keys keys;
Cong Wang441d9d32013-01-21 00:39:24 +0000264
265 if (!skb_flow_dissect(skb, &keys))
266 return;
267
268 if (keys.ports)
Tom Herbert61b905d2014-03-24 15:34:47 -0700269 skb->l4_hash = 1;
Cong Wang441d9d32013-01-21 00:39:24 +0000270
Tom Herberta3b18dd2014-07-01 21:33:17 -0700271 skb->sw_hash = 1;
272
Tom Herbert5ed20a62014-07-01 21:32:05 -0700273 skb->hash = __flow_hash_from_keys(&keys);
Cong Wang441d9d32013-01-21 00:39:24 +0000274}
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800275EXPORT_SYMBOL(__skb_get_hash);
Cong Wang441d9d32013-01-21 00:39:24 +0000276
277/*
278 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
279 * to be used as a distribution range.
280 */
Tom Herbert0e001612014-07-01 21:32:27 -0700281u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
Cong Wang441d9d32013-01-21 00:39:24 +0000282 unsigned int num_tx_queues)
283{
284 u32 hash;
285 u16 qoffset = 0;
286 u16 qcount = num_tx_queues;
287
288 if (skb_rx_queue_recorded(skb)) {
289 hash = skb_get_rx_queue(skb);
290 while (unlikely(hash >= num_tx_queues))
291 hash -= num_tx_queues;
292 return hash;
293 }
294
295 if (dev->num_tc) {
296 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
297 qoffset = dev->tc_to_txq[tc].offset;
298 qcount = dev->tc_to_txq[tc].count;
299 }
300
Daniel Borkmann8fc54f62014-08-23 20:58:54 +0200301 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
Cong Wang441d9d32013-01-21 00:39:24 +0000302}
303EXPORT_SYMBOL(__skb_tx_hash);
304
Daniel Borkmannf77668d2013-03-19 06:39:30 +0000305/* __skb_get_poff() returns the offset to the payload as far as it could
306 * be dissected. The main user is currently BPF, so that we can dynamically
307 * truncate packets without needing to push actual payload to the user
308 * space and can analyze headers only, instead.
309 */
310u32 __skb_get_poff(const struct sk_buff *skb)
311{
312 struct flow_keys keys;
313 u32 poff = 0;
314
315 if (!skb_flow_dissect(skb, &keys))
316 return 0;
317
318 poff += keys.thoff;
319 switch (keys.ip_proto) {
320 case IPPROTO_TCP: {
321 const struct tcphdr *tcph;
322 struct tcphdr _tcph;
323
324 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
325 if (!tcph)
326 return poff;
327
328 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
329 break;
330 }
331 case IPPROTO_UDP:
332 case IPPROTO_UDPLITE:
333 poff += sizeof(struct udphdr);
334 break;
335 /* For the rest, we do not really care about header
336 * extensions at this point for now.
337 */
338 case IPPROTO_ICMP:
339 poff += sizeof(struct icmphdr);
340 break;
341 case IPPROTO_ICMPV6:
342 poff += sizeof(struct icmp6hdr);
343 break;
344 case IPPROTO_IGMP:
345 poff += sizeof(struct igmphdr);
346 break;
347 case IPPROTO_DCCP:
348 poff += sizeof(struct dccp_hdr);
349 break;
350 case IPPROTO_SCTP:
351 poff += sizeof(struct sctphdr);
352 break;
353 }
354
355 return poff;
356}
357
Cong Wang441d9d32013-01-21 00:39:24 +0000358static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
359{
360#ifdef CONFIG_XPS
361 struct xps_dev_maps *dev_maps;
362 struct xps_map *map;
363 int queue_index = -1;
364
365 rcu_read_lock();
366 dev_maps = rcu_dereference(dev->xps_maps);
367 if (dev_maps) {
368 map = rcu_dereference(
369 dev_maps->cpu_map[raw_smp_processor_id()]);
370 if (map) {
371 if (map->len == 1)
372 queue_index = map->queues[0];
Tom Herbert0e001612014-07-01 21:32:27 -0700373 else
Daniel Borkmann8fc54f62014-08-23 20:58:54 +0200374 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
375 map->len)];
Cong Wang441d9d32013-01-21 00:39:24 +0000376 if (unlikely(queue_index >= dev->real_num_tx_queues))
377 queue_index = -1;
378 }
379 }
380 rcu_read_unlock();
381
382 return queue_index;
383#else
384 return -1;
385#endif
386}
387
Daniel Borkmann99932d42014-02-16 15:55:20 +0100388static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
Cong Wang441d9d32013-01-21 00:39:24 +0000389{
390 struct sock *sk = skb->sk;
391 int queue_index = sk_tx_queue_get(sk);
392
393 if (queue_index < 0 || skb->ooo_okay ||
394 queue_index >= dev->real_num_tx_queues) {
395 int new_index = get_xps_queue(dev, skb);
396 if (new_index < 0)
397 new_index = skb_tx_hash(dev, skb);
398
Eric Dumazet702821f2013-08-28 18:10:43 -0700399 if (queue_index != new_index && sk &&
400 rcu_access_pointer(sk->sk_dst_cache))
Eric Dumazet50d17842013-09-07 12:02:57 -0700401 sk_tx_queue_set(sk, new_index);
Cong Wang441d9d32013-01-21 00:39:24 +0000402
403 queue_index = new_index;
404 }
405
406 return queue_index;
407}
Cong Wang441d9d32013-01-21 00:39:24 +0000408
409struct netdev_queue *netdev_pick_tx(struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +0800410 struct sk_buff *skb,
411 void *accel_priv)
Cong Wang441d9d32013-01-21 00:39:24 +0000412{
413 int queue_index = 0;
414
415 if (dev->real_num_tx_queues != 1) {
416 const struct net_device_ops *ops = dev->netdev_ops;
417 if (ops->ndo_select_queue)
Daniel Borkmann99932d42014-02-16 15:55:20 +0100418 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
419 __netdev_pick_tx);
Cong Wang441d9d32013-01-21 00:39:24 +0000420 else
421 queue_index = __netdev_pick_tx(dev, skb);
Jason Wangf663dd92014-01-10 16:18:26 +0800422
423 if (!accel_priv)
Daniel Borkmannb9507bd2014-02-16 15:55:21 +0100424 queue_index = netdev_cap_txqueue(dev, queue_index);
Cong Wang441d9d32013-01-21 00:39:24 +0000425 }
426
427 skb_set_queue_mapping(skb, queue_index);
428 return netdev_get_tx_queue(dev, queue_index);
429}