blob: 62d1cb624f5366a14c518d933ee7268c99ec54f1 [file] [log] [blame]
Eric Dumazet0744dd02011-11-28 05:22:18 +00001#include <linux/skbuff.h>
Jesper Dangaard Brouerc452ed72012-01-24 16:03:33 -05002#include <linux/export.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +00003#include <linux/ip.h>
4#include <linux/ipv6.h>
5#include <linux/if_vlan.h>
6#include <net/ip.h>
Eric Dumazetddbe5032012-07-18 08:11:12 +00007#include <net/ipv6.h>
Daniel Borkmannf77668d2013-03-19 06:39:30 +00008#include <linux/igmp.h>
9#include <linux/icmp.h>
10#include <linux/sctp.h>
11#include <linux/dccp.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +000012#include <linux/if_tunnel.h>
13#include <linux/if_pppox.h>
14#include <linux/ppp_defs.h>
15#include <net/flow_keys.h>
16
Eric Dumazet4d77d2b2011-11-28 20:30:35 +000017/* copy saddr & daddr, possibly using 64bit load/store
18 * Equivalent to : flow->src = iph->saddr;
19 * flow->dst = iph->daddr;
20 */
21static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
22{
23 BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24 offsetof(typeof(*flow), src) + sizeof(flow->src));
25 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
26}
Eric Dumazet0744dd02011-11-28 05:22:18 +000027
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020028/**
29 * skb_flow_get_ports - extract the upper layer ports and return them
30 * @skb: buffer to extract the ports from
31 * @thoff: transport header offset
32 * @ip_proto: protocol for which to get port offset
33 *
34 * The function will try to retrieve the ports at offset thoff + poff where poff
35 * is the protocol port offset returned from proto_ports_offset
36 */
37__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
38{
39 int poff = proto_ports_offset(ip_proto);
40
41 if (poff >= 0) {
42 __be32 *ports, _ports;
43
44 ports = skb_header_pointer(skb, thoff + poff,
45 sizeof(_ports), &_ports);
46 if (ports)
47 return *ports;
48 }
49
50 return 0;
51}
52EXPORT_SYMBOL(skb_flow_get_ports);
53
Eric Dumazet0744dd02011-11-28 05:22:18 +000054bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
55{
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +020056 int nhoff = skb_network_offset(skb);
Eric Dumazet0744dd02011-11-28 05:22:18 +000057 u8 ip_proto;
58 __be16 proto = skb->protocol;
59
60 memset(flow, 0, sizeof(*flow));
61
62again:
63 switch (proto) {
Joe Perches2b8837a2014-03-12 10:04:17 -070064 case htons(ETH_P_IP): {
Eric Dumazet0744dd02011-11-28 05:22:18 +000065 const struct iphdr *iph;
66 struct iphdr _iph;
67ip:
68 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
Jason Wang6f092342013-11-01 15:01:10 +080069 if (!iph || iph->ihl < 5)
Eric Dumazet0744dd02011-11-28 05:22:18 +000070 return false;
Eric Dumazet3797d3e2013-11-07 08:37:28 -080071 nhoff += iph->ihl * 4;
Eric Dumazet0744dd02011-11-28 05:22:18 +000072
Eric Dumazet3797d3e2013-11-07 08:37:28 -080073 ip_proto = iph->protocol;
Eric Dumazet0744dd02011-11-28 05:22:18 +000074 if (ip_is_fragment(iph))
75 ip_proto = 0;
Eric Dumazet3797d3e2013-11-07 08:37:28 -080076
Eric Dumazet4d77d2b2011-11-28 20:30:35 +000077 iph_to_flow_copy_addrs(flow, iph);
Eric Dumazet0744dd02011-11-28 05:22:18 +000078 break;
79 }
Joe Perches2b8837a2014-03-12 10:04:17 -070080 case htons(ETH_P_IPV6): {
Eric Dumazet0744dd02011-11-28 05:22:18 +000081 const struct ipv6hdr *iph;
82 struct ipv6hdr _iph;
83ipv6:
84 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
85 if (!iph)
86 return false;
87
88 ip_proto = iph->nexthdr;
Eric Dumazetddbe5032012-07-18 08:11:12 +000089 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
90 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
Eric Dumazet0744dd02011-11-28 05:22:18 +000091 nhoff += sizeof(struct ipv6hdr);
92 break;
93 }
Joe Perches2b8837a2014-03-12 10:04:17 -070094 case htons(ETH_P_8021AD):
95 case htons(ETH_P_8021Q): {
Eric Dumazet0744dd02011-11-28 05:22:18 +000096 const struct vlan_hdr *vlan;
97 struct vlan_hdr _vlan;
98
99 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
100 if (!vlan)
101 return false;
102
103 proto = vlan->h_vlan_encapsulated_proto;
104 nhoff += sizeof(*vlan);
105 goto again;
106 }
Joe Perches2b8837a2014-03-12 10:04:17 -0700107 case htons(ETH_P_PPP_SES): {
Eric Dumazet0744dd02011-11-28 05:22:18 +0000108 struct {
109 struct pppoe_hdr hdr;
110 __be16 proto;
111 } *hdr, _hdr;
112 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
113 if (!hdr)
114 return false;
115 proto = hdr->proto;
116 nhoff += PPPOE_SES_HLEN;
117 switch (proto) {
Joe Perches2b8837a2014-03-12 10:04:17 -0700118 case htons(PPP_IP):
Eric Dumazet0744dd02011-11-28 05:22:18 +0000119 goto ip;
Joe Perches2b8837a2014-03-12 10:04:17 -0700120 case htons(PPP_IPV6):
Eric Dumazet0744dd02011-11-28 05:22:18 +0000121 goto ipv6;
122 default:
123 return false;
124 }
125 }
126 default:
127 return false;
128 }
129
130 switch (ip_proto) {
131 case IPPROTO_GRE: {
132 struct gre_hdr {
133 __be16 flags;
134 __be16 proto;
135 } *hdr, _hdr;
136
137 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
138 if (!hdr)
139 return false;
140 /*
141 * Only look inside GRE if version zero and no
142 * routing
143 */
144 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
145 proto = hdr->proto;
146 nhoff += 4;
147 if (hdr->flags & GRE_CSUM)
148 nhoff += 4;
149 if (hdr->flags & GRE_KEY)
150 nhoff += 4;
151 if (hdr->flags & GRE_SEQ)
152 nhoff += 4;
Michael Daltone1733de2013-03-11 06:52:28 +0000153 if (proto == htons(ETH_P_TEB)) {
154 const struct ethhdr *eth;
155 struct ethhdr _eth;
156
157 eth = skb_header_pointer(skb, nhoff,
158 sizeof(_eth), &_eth);
159 if (!eth)
160 return false;
161 proto = eth->h_proto;
162 nhoff += sizeof(*eth);
163 }
Eric Dumazet0744dd02011-11-28 05:22:18 +0000164 goto again;
165 }
166 break;
167 }
168 case IPPROTO_IPIP:
Tom Herbertfca41892013-07-29 11:07:36 -0700169 proto = htons(ETH_P_IP);
170 goto ip;
Tom Herbertb438f942013-07-29 11:07:42 -0700171 case IPPROTO_IPV6:
172 proto = htons(ETH_P_IPV6);
173 goto ipv6;
Eric Dumazet0744dd02011-11-28 05:22:18 +0000174 default:
175 break;
176 }
177
Govindarajulu Varadarajane0f31d82014-06-23 16:07:58 +0530178 flow->n_proto = proto;
Eric Dumazet0744dd02011-11-28 05:22:18 +0000179 flow->ip_proto = ip_proto;
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +0200180 flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
Daniel Borkmann8ed78162013-03-19 06:39:29 +0000181 flow->thoff = (u16) nhoff;
182
Eric Dumazet0744dd02011-11-28 05:22:18 +0000183 return true;
184}
185EXPORT_SYMBOL(skb_flow_dissect);
Cong Wang441d9d32013-01-21 00:39:24 +0000186
187static u32 hashrnd __read_mostly;
Hannes Frederic Sowa66415cf2013-10-23 20:06:00 +0200188static __always_inline void __flow_hash_secret_init(void)
189{
190 net_get_random_once(&hashrnd, sizeof(hashrnd));
191}
192
193static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
194{
195 __flow_hash_secret_init();
196 return jhash_3words(a, b, c, hashrnd);
197}
198
Tom Herbert5ed20a62014-07-01 21:32:05 -0700199static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
200{
201 u32 hash;
202
203 /* get a consistent hash (same value on both flow directions) */
204 if (((__force u32)keys->dst < (__force u32)keys->src) ||
205 (((__force u32)keys->dst == (__force u32)keys->src) &&
206 ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
207 swap(keys->dst, keys->src);
208 swap(keys->port16[0], keys->port16[1]);
209 }
210
211 hash = __flow_hash_3words((__force u32)keys->dst,
212 (__force u32)keys->src,
213 (__force u32)keys->ports);
214 if (!hash)
215 hash = 1;
216
217 return hash;
218}
219
220u32 flow_hash_from_keys(struct flow_keys *keys)
221{
222 return __flow_hash_from_keys(keys);
223}
224EXPORT_SYMBOL(flow_hash_from_keys);
225
Cong Wang441d9d32013-01-21 00:39:24 +0000226/*
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800227 * __skb_get_hash: calculate a flow hash based on src/dst addresses
Tom Herbert61b905d2014-03-24 15:34:47 -0700228 * and src/dst port numbers. Sets hash in skb to non-zero hash value
229 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
Cong Wang441d9d32013-01-21 00:39:24 +0000230 * if hash is a canonical 4-tuple hash over transport ports.
231 */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800232void __skb_get_hash(struct sk_buff *skb)
Cong Wang441d9d32013-01-21 00:39:24 +0000233{
234 struct flow_keys keys;
Cong Wang441d9d32013-01-21 00:39:24 +0000235
236 if (!skb_flow_dissect(skb, &keys))
237 return;
238
239 if (keys.ports)
Tom Herbert61b905d2014-03-24 15:34:47 -0700240 skb->l4_hash = 1;
Cong Wang441d9d32013-01-21 00:39:24 +0000241
Tom Herbert5ed20a62014-07-01 21:32:05 -0700242 skb->hash = __flow_hash_from_keys(&keys);
Cong Wang441d9d32013-01-21 00:39:24 +0000243}
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800244EXPORT_SYMBOL(__skb_get_hash);
Cong Wang441d9d32013-01-21 00:39:24 +0000245
246/*
247 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
248 * to be used as a distribution range.
249 */
Tom Herbert0e001612014-07-01 21:32:27 -0700250u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
Cong Wang441d9d32013-01-21 00:39:24 +0000251 unsigned int num_tx_queues)
252{
253 u32 hash;
254 u16 qoffset = 0;
255 u16 qcount = num_tx_queues;
256
257 if (skb_rx_queue_recorded(skb)) {
258 hash = skb_get_rx_queue(skb);
259 while (unlikely(hash >= num_tx_queues))
260 hash -= num_tx_queues;
261 return hash;
262 }
263
264 if (dev->num_tc) {
265 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
266 qoffset = dev->tc_to_txq[tc].offset;
267 qcount = dev->tc_to_txq[tc].count;
268 }
269
Tom Herbert0e001612014-07-01 21:32:27 -0700270 return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset;
Cong Wang441d9d32013-01-21 00:39:24 +0000271}
272EXPORT_SYMBOL(__skb_tx_hash);
273
Daniel Borkmannf77668d2013-03-19 06:39:30 +0000274/* __skb_get_poff() returns the offset to the payload as far as it could
275 * be dissected. The main user is currently BPF, so that we can dynamically
276 * truncate packets without needing to push actual payload to the user
277 * space and can analyze headers only, instead.
278 */
279u32 __skb_get_poff(const struct sk_buff *skb)
280{
281 struct flow_keys keys;
282 u32 poff = 0;
283
284 if (!skb_flow_dissect(skb, &keys))
285 return 0;
286
287 poff += keys.thoff;
288 switch (keys.ip_proto) {
289 case IPPROTO_TCP: {
290 const struct tcphdr *tcph;
291 struct tcphdr _tcph;
292
293 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
294 if (!tcph)
295 return poff;
296
297 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
298 break;
299 }
300 case IPPROTO_UDP:
301 case IPPROTO_UDPLITE:
302 poff += sizeof(struct udphdr);
303 break;
304 /* For the rest, we do not really care about header
305 * extensions at this point for now.
306 */
307 case IPPROTO_ICMP:
308 poff += sizeof(struct icmphdr);
309 break;
310 case IPPROTO_ICMPV6:
311 poff += sizeof(struct icmp6hdr);
312 break;
313 case IPPROTO_IGMP:
314 poff += sizeof(struct igmphdr);
315 break;
316 case IPPROTO_DCCP:
317 poff += sizeof(struct dccp_hdr);
318 break;
319 case IPPROTO_SCTP:
320 poff += sizeof(struct sctphdr);
321 break;
322 }
323
324 return poff;
325}
326
Cong Wang441d9d32013-01-21 00:39:24 +0000327static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
328{
329#ifdef CONFIG_XPS
330 struct xps_dev_maps *dev_maps;
331 struct xps_map *map;
332 int queue_index = -1;
333
334 rcu_read_lock();
335 dev_maps = rcu_dereference(dev->xps_maps);
336 if (dev_maps) {
337 map = rcu_dereference(
338 dev_maps->cpu_map[raw_smp_processor_id()]);
339 if (map) {
340 if (map->len == 1)
341 queue_index = map->queues[0];
Tom Herbert0e001612014-07-01 21:32:27 -0700342 else
Cong Wang441d9d32013-01-21 00:39:24 +0000343 queue_index = map->queues[
Tom Herbert0e001612014-07-01 21:32:27 -0700344 ((u64)skb_get_hash(skb) * map->len) >> 32];
345
Cong Wang441d9d32013-01-21 00:39:24 +0000346 if (unlikely(queue_index >= dev->real_num_tx_queues))
347 queue_index = -1;
348 }
349 }
350 rcu_read_unlock();
351
352 return queue_index;
353#else
354 return -1;
355#endif
356}
357
Daniel Borkmann99932d42014-02-16 15:55:20 +0100358static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
Cong Wang441d9d32013-01-21 00:39:24 +0000359{
360 struct sock *sk = skb->sk;
361 int queue_index = sk_tx_queue_get(sk);
362
363 if (queue_index < 0 || skb->ooo_okay ||
364 queue_index >= dev->real_num_tx_queues) {
365 int new_index = get_xps_queue(dev, skb);
366 if (new_index < 0)
367 new_index = skb_tx_hash(dev, skb);
368
Eric Dumazet702821f2013-08-28 18:10:43 -0700369 if (queue_index != new_index && sk &&
370 rcu_access_pointer(sk->sk_dst_cache))
Eric Dumazet50d17842013-09-07 12:02:57 -0700371 sk_tx_queue_set(sk, new_index);
Cong Wang441d9d32013-01-21 00:39:24 +0000372
373 queue_index = new_index;
374 }
375
376 return queue_index;
377}
Cong Wang441d9d32013-01-21 00:39:24 +0000378
379struct netdev_queue *netdev_pick_tx(struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +0800380 struct sk_buff *skb,
381 void *accel_priv)
Cong Wang441d9d32013-01-21 00:39:24 +0000382{
383 int queue_index = 0;
384
385 if (dev->real_num_tx_queues != 1) {
386 const struct net_device_ops *ops = dev->netdev_ops;
387 if (ops->ndo_select_queue)
Daniel Borkmann99932d42014-02-16 15:55:20 +0100388 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
389 __netdev_pick_tx);
Cong Wang441d9d32013-01-21 00:39:24 +0000390 else
391 queue_index = __netdev_pick_tx(dev, skb);
Jason Wangf663dd92014-01-10 16:18:26 +0800392
393 if (!accel_priv)
Daniel Borkmannb9507bd2014-02-16 15:55:21 +0100394 queue_index = netdev_cap_txqueue(dev, queue_index);
Cong Wang441d9d32013-01-21 00:39:24 +0000395 }
396
397 skb_set_queue_mapping(skb, queue_index);
398 return netdev_get_tx_queue(dev, queue_index);
399}