blob: ca91fc33f8a94adbffd32847af05aa1d49e9d90a [file] [log] [blame]
Jesse Grossccb13522011-10-25 19:26:31 -07001/*
Andy Zhou971427f32014-09-15 19:37:25 -07002 * Copyright (c) 2007-2014 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070025#include <linux/netfilter_ipv6.h>
Joe Stringera175a722013-08-22 12:30:48 -070026#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070027#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070032
Joe Stringer7f8a4362015-08-26 11:31:48 -070033#include <net/dst.h>
Jesse Grossccb13522011-10-25 19:26:31 -070034#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080035#include <net/ipv6.h>
Joe Stringer7b85b4d2015-08-27 15:25:46 -070036#include <net/ip6_fib.h>
Jesse Grossccb13522011-10-25 19:26:31 -070037#include <net/checksum.h>
38#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070039#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070040#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070041
42#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070043#include "flow.h"
Joe Stringer7f8a4362015-08-26 11:31:48 -070044#include "conntrack.h"
Jesse Grossccb13522011-10-25 19:26:31 -070045#include "vport.h"
46
47static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -070048 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -070049 const struct nlattr *attr, int len);
Jesse Grossccb13522011-10-25 19:26:31 -070050
Andy Zhou971427f32014-09-15 19:37:25 -070051struct deferred_action {
52 struct sk_buff *skb;
53 const struct nlattr *actions;
54
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
57};
58
Joe Stringer7f8a4362015-08-26 11:31:48 -070059#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60struct ovs_frag_data {
61 unsigned long dst;
62 struct vport *vport;
63 struct ovs_skb_cb cb;
64 __be16 inner_protocol;
65 __u16 vlan_tci;
66 __be16 vlan_proto;
67 unsigned int l2_len;
68 u8 l2_data[MAX_L2_LEN];
69};
70
71static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72
Andy Zhou971427f32014-09-15 19:37:25 -070073#define DEFERRED_ACTION_FIFO_SIZE 10
74struct action_fifo {
75 int head;
76 int tail;
77 /* Deferred action fifo queue storage. */
78 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
79};
80
81static struct action_fifo __percpu *action_fifos;
82static DEFINE_PER_CPU(int, exec_actions_level);
83
84static void action_fifo_init(struct action_fifo *fifo)
85{
86 fifo->head = 0;
87 fifo->tail = 0;
88}
89
Thomas Graf12eb18f2014-11-06 06:58:52 -080090static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -070091{
92 return (fifo->head == fifo->tail);
93}
94
95static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
96{
97 if (action_fifo_is_empty(fifo))
98 return NULL;
99
100 return &fifo->fifo[fifo->tail++];
101}
102
103static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
104{
105 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
106 return NULL;
107
108 return &fifo->fifo[fifo->head++];
109}
110
111/* Return true if fifo is not full */
112static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800113 const struct sw_flow_key *key,
Andy Zhou971427f32014-09-15 19:37:25 -0700114 const struct nlattr *attr)
115{
116 struct action_fifo *fifo;
117 struct deferred_action *da;
118
119 fifo = this_cpu_ptr(action_fifos);
120 da = action_fifo_put(fifo);
121 if (da) {
122 da->skb = skb;
123 da->actions = attr;
124 da->pkt_key = *key;
125 }
126
127 return da;
128}
129
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800130static void invalidate_flow_key(struct sw_flow_key *key)
131{
132 key->eth.type = htons(0);
133}
134
135static bool is_flow_key_valid(const struct sw_flow_key *key)
136{
137 return !!key->eth.type;
138}
139
Simon Hormanbc7cc592016-05-30 14:04:25 +0900140static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
141 __be16 ethertype)
142{
143 if (skb->ip_summed == CHECKSUM_COMPLETE) {
144 __be16 diff[] = { ~(hdr->h_proto), ethertype };
145
146 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
147 ~skb->csum);
148 }
149
150 hdr->h_proto = ethertype;
151}
152
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800153static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Simon Horman25cd9ba2014-10-06 05:05:13 -0700154 const struct ovs_action_push_mpls *mpls)
155{
156 __be32 *new_mpls_lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700157
158 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
159 if (skb->encapsulation)
160 return -ENOTSUPP;
161
162 if (skb_cow_head(skb, MPLS_HLEN) < 0)
163 return -ENOMEM;
164
David Ahern48d2ab62016-08-24 20:10:44 -0700165 if (!skb->inner_protocol) {
166 skb_set_inner_network_header(skb, skb->mac_len);
167 skb_set_inner_protocol(skb, skb->protocol);
168 }
169
Simon Horman25cd9ba2014-10-06 05:05:13 -0700170 skb_push(skb, MPLS_HLEN);
171 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
172 skb->mac_len);
173 skb_reset_mac_header(skb);
David Ahern48d2ab62016-08-24 20:10:44 -0700174 skb_set_network_header(skb, skb->mac_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700175
176 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
177 *new_mpls_lse = mpls->mpls_lse;
178
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100179 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700180
Simon Hormanbc7cc592016-05-30 14:04:25 +0900181 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700182 skb->protocol = mpls->mpls_ethertype;
183
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800184 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700185 return 0;
186}
187
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800188static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
189 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700190{
191 struct ethhdr *hdr;
192 int err;
193
Jiri Pirkoe2195122014-11-19 14:05:01 +0100194 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700195 if (unlikely(err))
196 return err;
197
Jiri Pirko1abcd822014-11-19 14:04:55 +0100198 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700199
200 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
201 skb->mac_len);
202
203 __skb_pull(skb, MPLS_HLEN);
204 skb_reset_mac_header(skb);
David Ahern48d2ab62016-08-24 20:10:44 -0700205 skb_set_network_header(skb, skb->mac_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700206
207 /* skb_mpls_header() is used to locate the ethertype
208 * field correctly in the presence of VLAN tags.
209 */
210 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
Simon Hormanbc7cc592016-05-30 14:04:25 +0900211 update_ethertype(skb, hdr, ethertype);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700212 if (eth_p_mpls(skb->protocol))
213 skb->protocol = ethertype;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800214
215 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700216 return 0;
217}
218
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800219static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
220 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700221{
222 __be32 *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800223 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700224 int err;
225
Jiri Pirkoe2195122014-11-19 14:05:01 +0100226 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700227 if (unlikely(err))
228 return err;
229
230 stack = (__be32 *)skb_mpls_header(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700231 lse = OVS_MASKED(*stack, *mpls_lse, *mask);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700232 if (skb->ip_summed == CHECKSUM_COMPLETE) {
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800233 __be32 diff[] = { ~(*stack), lse };
234
Simon Horman25cd9ba2014-10-06 05:05:13 -0700235 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
236 ~skb->csum);
237 }
238
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800239 *stack = lse;
240 flow_key->mpls.top_lse = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700241 return 0;
242}
243
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800244static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700245{
Jesse Grossccb13522011-10-25 19:26:31 -0700246 int err;
247
Jiri Pirko93515d52014-11-19 14:05:02 +0100248 err = skb_vlan_pop(skb);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100249 if (skb_vlan_tag_present(skb))
Jiri Pirko93515d52014-11-19 14:05:02 +0100250 invalidate_flow_key(key);
251 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800252 key->eth.tci = 0;
Jiri Pirko93515d52014-11-19 14:05:02 +0100253 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700254}
255
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800256static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
257 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700258{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100259 if (skb_vlan_tag_present(skb))
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800260 invalidate_flow_key(key);
Jiri Pirko93515d52014-11-19 14:05:02 +0100261 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800262 key->eth.tci = vlan->vlan_tci;
Jiri Pirko93515d52014-11-19 14:05:02 +0100263 return skb_vlan_push(skb, vlan->vlan_tpid,
264 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
Jesse Grossccb13522011-10-25 19:26:31 -0700265}
266
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800267/* 'src' is already properly masked. */
268static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
269{
270 u16 *dst = (u16 *)dst_;
271 const u16 *src = (const u16 *)src_;
272 const u16 *mask = (const u16 *)mask_;
273
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700274 OVS_SET_MASKED(dst[0], src[0], mask[0]);
275 OVS_SET_MASKED(dst[1], src[1], mask[1]);
276 OVS_SET_MASKED(dst[2], src[2], mask[2]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800277}
278
279static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
280 const struct ovs_key_ethernet *key,
281 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700282{
283 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800284
Jiri Pirkoe2195122014-11-19 14:05:01 +0100285 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700286 if (unlikely(err))
287 return err;
288
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700289 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
290
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800291 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
292 mask->eth_src);
293 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
294 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700295
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100296 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700297
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800298 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
299 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700300 return 0;
301}
302
Glenn Griffin3576fd72015-08-03 09:56:54 -0700303static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
304 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700305{
306 int transport_len = skb->len - skb_transport_offset(skb);
307
Glenn Griffin3576fd72015-08-03 09:56:54 -0700308 if (nh->frag_off & htons(IP_OFFSET))
309 return;
310
Jesse Grossccb13522011-10-25 19:26:31 -0700311 if (nh->protocol == IPPROTO_TCP) {
312 if (likely(transport_len >= sizeof(struct tcphdr)))
313 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700314 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700315 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800316 if (likely(transport_len >= sizeof(struct udphdr))) {
317 struct udphdr *uh = udp_hdr(skb);
318
319 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
320 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700321 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800322 if (!uh->check)
323 uh->check = CSUM_MANGLED_0;
324 }
325 }
Jesse Grossccb13522011-10-25 19:26:31 -0700326 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700327}
Jesse Grossccb13522011-10-25 19:26:31 -0700328
Glenn Griffin3576fd72015-08-03 09:56:54 -0700329static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
330 __be32 *addr, __be32 new_addr)
331{
332 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700333 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800334 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700335 *addr = new_addr;
336}
337
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800338static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
339 __be32 addr[4], const __be32 new_addr[4])
340{
341 int transport_len = skb->len - skb_transport_offset(skb);
342
Jesse Gross856447d2014-11-11 14:32:20 -0800343 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800344 if (likely(transport_len >= sizeof(struct tcphdr)))
345 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700346 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800347 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800348 if (likely(transport_len >= sizeof(struct udphdr))) {
349 struct udphdr *uh = udp_hdr(skb);
350
351 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
352 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700353 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800354 if (!uh->check)
355 uh->check = CSUM_MANGLED_0;
356 }
357 }
Jesse Gross856447d2014-11-11 14:32:20 -0800358 } else if (l4_proto == NEXTHDR_ICMP) {
359 if (likely(transport_len >= sizeof(struct icmp6hdr)))
360 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700361 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800362 }
363}
364
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800365static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
366 const __be32 mask[4], __be32 masked[4])
367{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700368 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
369 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
370 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
371 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800372}
373
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800374static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
375 __be32 addr[4], const __be32 new_addr[4],
376 bool recalculate_csum)
377{
378 if (recalculate_csum)
379 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
380
Tom Herbert7539fad2013-12-15 22:12:18 -0800381 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800382 memcpy(addr, new_addr, sizeof(__be32[4]));
383}
384
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800385static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800386{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800387 /* Bits 21-24 are always unmasked, so this retains their values. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700388 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
389 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
390 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800391}
392
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800393static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
394 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800395{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700396 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800397
Jesse Grossccb13522011-10-25 19:26:31 -0700398 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
399 nh->ttl = new_ttl;
400}
401
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800402static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
403 const struct ovs_key_ipv4 *key,
404 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700405{
406 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800407 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700408 int err;
409
Jiri Pirkoe2195122014-11-19 14:05:01 +0100410 err = skb_ensure_writable(skb, skb_network_offset(skb) +
411 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700412 if (unlikely(err))
413 return err;
414
415 nh = ip_hdr(skb);
416
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800417 /* Setting an IP addresses is typically only a side effect of
418 * matching on them in the current userspace implementation, so it
419 * makes sense to check if the value actually changed.
420 */
421 if (mask->ipv4_src) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700422 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700423
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800424 if (unlikely(new_addr != nh->saddr)) {
425 set_ip_addr(skb, nh, &nh->saddr, new_addr);
426 flow_key->ipv4.addr.src = new_addr;
427 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800428 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800429 if (mask->ipv4_dst) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700430 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700431
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800432 if (unlikely(new_addr != nh->daddr)) {
433 set_ip_addr(skb, nh, &nh->daddr, new_addr);
434 flow_key->ipv4.addr.dst = new_addr;
435 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800436 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800437 if (mask->ipv4_tos) {
438 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
439 flow_key->ip.tos = nh->tos;
440 }
441 if (mask->ipv4_ttl) {
442 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
443 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800444 }
Jesse Grossccb13522011-10-25 19:26:31 -0700445
446 return 0;
447}
448
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800449static bool is_ipv6_mask_nonzero(const __be32 addr[4])
450{
451 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
452}
453
454static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
455 const struct ovs_key_ipv6 *key,
456 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800457{
458 struct ipv6hdr *nh;
459 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800460
Jiri Pirkoe2195122014-11-19 14:05:01 +0100461 err = skb_ensure_writable(skb, skb_network_offset(skb) +
462 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800463 if (unlikely(err))
464 return err;
465
466 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800467
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800468 /* Setting an IP addresses is typically only a side effect of
469 * matching on them in the current userspace implementation, so it
470 * makes sense to check if the value actually changed.
471 */
472 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
473 __be32 *saddr = (__be32 *)&nh->saddr;
474 __be32 masked[4];
475
476 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
477
478 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
Simon Hormanb4f70522016-04-21 11:49:15 +1000479 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800480 true);
481 memcpy(&flow_key->ipv6.addr.src, masked,
482 sizeof(flow_key->ipv6.addr.src));
483 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800484 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800485 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800486 unsigned int offset = 0;
487 int flags = IP6_FH_F_SKIP_RH;
488 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800489 __be32 *daddr = (__be32 *)&nh->daddr;
490 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800491
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800492 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800493
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800494 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
495 if (ipv6_ext_hdr(nh->nexthdr))
496 recalc_csum = (ipv6_find_hdr(skb, &offset,
497 NEXTHDR_ROUTING,
498 NULL, &flags)
499 != NEXTHDR_ROUTING);
500
Simon Hormanb4f70522016-04-21 11:49:15 +1000501 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800502 recalc_csum);
503 memcpy(&flow_key->ipv6.addr.dst, masked,
504 sizeof(flow_key->ipv6.addr.dst));
505 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800506 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800507 if (mask->ipv6_tclass) {
508 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
509 flow_key->ip.tos = ipv6_get_dsfield(nh);
510 }
511 if (mask->ipv6_label) {
512 set_ipv6_fl(nh, ntohl(key->ipv6_label),
513 ntohl(mask->ipv6_label));
514 flow_key->ipv6.label =
515 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
516 }
517 if (mask->ipv6_hlimit) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700518 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
519 mask->ipv6_hlimit);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800520 flow_key->ip.ttl = nh->hop_limit;
521 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800522 return 0;
523}
524
Jiri Pirkoe2195122014-11-19 14:05:01 +0100525/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700526static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800527 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700528{
Tom Herbert4b048d62015-08-17 13:42:25 -0700529 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700530 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700531}
532
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800533static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
534 const struct ovs_key_udp *key,
535 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700536{
537 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800538 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700539 int err;
540
Jiri Pirkoe2195122014-11-19 14:05:01 +0100541 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
542 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700543 if (unlikely(err))
544 return err;
545
546 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800547 /* Either of the masks is non-zero, so do not bother checking them. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700548 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
549 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800550
551 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
552 if (likely(src != uh->source)) {
553 set_tp_port(skb, &uh->source, src, &uh->check);
554 flow_key->tp.src = src;
555 }
556 if (likely(dst != uh->dest)) {
557 set_tp_port(skb, &uh->dest, dst, &uh->check);
558 flow_key->tp.dst = dst;
559 }
560
561 if (unlikely(!uh->check))
562 uh->check = CSUM_MANGLED_0;
563 } else {
564 uh->source = src;
565 uh->dest = dst;
566 flow_key->tp.src = src;
567 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800568 }
Jesse Grossccb13522011-10-25 19:26:31 -0700569
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800570 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700571
572 return 0;
573}
574
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800575static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
576 const struct ovs_key_tcp *key,
577 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700578{
579 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800580 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700581 int err;
582
Jiri Pirkoe2195122014-11-19 14:05:01 +0100583 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
584 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700585 if (unlikely(err))
586 return err;
587
588 th = tcp_hdr(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700589 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800590 if (likely(src != th->source)) {
591 set_tp_port(skb, &th->source, src, &th->check);
592 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800593 }
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700594 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800595 if (likely(dst != th->dest)) {
596 set_tp_port(skb, &th->dest, dst, &th->check);
597 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800598 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800599 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700600
601 return 0;
602}
603
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800604static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
605 const struct ovs_key_sctp *key,
606 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700607{
Joe Stringera175a722013-08-22 12:30:48 -0700608 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800609 struct sctphdr *sh;
610 __le32 old_correct_csum, new_csum, old_csum;
611 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700612
Jiri Pirkoe2195122014-11-19 14:05:01 +0100613 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700614 if (unlikely(err))
615 return err;
616
617 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800618 old_csum = sh->checksum;
619 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700620
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700621 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
622 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700623
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800624 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700625
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800626 /* Carry any checksum errors through. */
627 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700628
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800629 skb_clear_hash(skb);
630 flow_key->tp.src = sh->source;
631 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700632
633 return 0;
634}
635
Eric W. Biederman188515f2015-09-14 20:08:51 -0500636static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700637{
638 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
639 struct vport *vport = data->vport;
640
641 if (skb_cow_head(skb, data->l2_len) < 0) {
642 kfree_skb(skb);
643 return -ENOMEM;
644 }
645
646 __skb_dst_copy(skb, data->dst);
647 *OVS_CB(skb) = data->cb;
648 skb->inner_protocol = data->inner_protocol;
649 skb->vlan_tci = data->vlan_tci;
650 skb->vlan_proto = data->vlan_proto;
651
652 /* Reconstruct the MAC header. */
653 skb_push(skb, data->l2_len);
654 memcpy(skb->data, &data->l2_data, data->l2_len);
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100655 skb_postpush_rcsum(skb, skb->data, data->l2_len);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700656 skb_reset_mac_header(skb);
657
658 ovs_vport_send(vport, skb);
659 return 0;
660}
661
662static unsigned int
663ovs_dst_get_mtu(const struct dst_entry *dst)
664{
665 return dst->dev->mtu;
666}
667
668static struct dst_ops ovs_dst_ops = {
669 .family = AF_UNSPEC,
670 .mtu = ovs_dst_get_mtu,
671};
672
673/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
674 * ovs_vport_output(), which is called once per fragmented packet.
675 */
676static void prepare_frag(struct vport *vport, struct sk_buff *skb)
677{
678 unsigned int hlen = skb_network_offset(skb);
679 struct ovs_frag_data *data;
680
681 data = this_cpu_ptr(&ovs_frag_data_storage);
682 data->dst = skb->_skb_refdst;
683 data->vport = vport;
684 data->cb = *OVS_CB(skb);
685 data->inner_protocol = skb->inner_protocol;
686 data->vlan_tci = skb->vlan_tci;
687 data->vlan_proto = skb->vlan_proto;
688 data->l2_len = hlen;
689 memcpy(&data->l2_data, skb->data, hlen);
690
691 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
692 skb_pull(skb, hlen);
693}
694
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500695static void ovs_fragment(struct net *net, struct vport *vport,
696 struct sk_buff *skb, u16 mru, __be16 ethertype)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700697{
698 if (skb_network_offset(skb) > MAX_L2_LEN) {
699 OVS_NLERR(1, "L2 header too long to fragment");
Joe Stringerb8f22572015-10-06 10:59:57 -0700700 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700701 }
702
703 if (ethertype == htons(ETH_P_IP)) {
704 struct dst_entry ovs_dst;
705 unsigned long orig_dst;
706
707 prepare_frag(vport, skb);
708 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
709 DST_OBSOLETE_NONE, DST_NOCOUNT);
710 ovs_dst.dev = vport->dev;
711
712 orig_dst = skb->_skb_refdst;
713 skb_dst_set_noref(skb, &ovs_dst);
714 IPCB(skb)->frag_max_size = mru;
715
Eric W. Biederman694869b2015-06-12 21:55:31 -0500716 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700717 refdst_drop(orig_dst);
718 } else if (ethertype == htons(ETH_P_IPV6)) {
719 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
720 unsigned long orig_dst;
721 struct rt6_info ovs_rt;
722
723 if (!v6ops) {
Joe Stringerb8f22572015-10-06 10:59:57 -0700724 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700725 }
726
727 prepare_frag(vport, skb);
728 memset(&ovs_rt, 0, sizeof(ovs_rt));
729 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
730 DST_OBSOLETE_NONE, DST_NOCOUNT);
731 ovs_rt.dst.dev = vport->dev;
732
733 orig_dst = skb->_skb_refdst;
734 skb_dst_set_noref(skb, &ovs_rt.dst);
735 IP6CB(skb)->frag_max_size = mru;
736
Eric W. Biederman7d8c6e32015-06-12 22:12:04 -0500737 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700738 refdst_drop(orig_dst);
739 } else {
740 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
741 ovs_vport_name(vport), ntohs(ethertype), mru,
742 vport->dev->mtu);
Joe Stringerb8f22572015-10-06 10:59:57 -0700743 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700744 }
Joe Stringerb8f22572015-10-06 10:59:57 -0700745
746 return;
747err:
748 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700749}
750
751static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
752 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700753{
Andy Zhou738967b2014-09-08 00:35:02 -0700754 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700755
Joe Stringer7f8a4362015-08-26 11:31:48 -0700756 if (likely(vport)) {
757 u16 mru = OVS_CB(skb)->mru;
William Tuf2a4d082016-06-10 11:49:33 -0700758 u32 cutlen = OVS_CB(skb)->cutlen;
759
760 if (unlikely(cutlen > 0)) {
761 if (skb->len - cutlen > ETH_HLEN)
762 pskb_trim(skb, skb->len - cutlen);
763 else
764 pskb_trim(skb, ETH_HLEN);
765 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700766
767 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
768 ovs_vport_send(vport, skb);
769 } else if (mru <= vport->dev->mtu) {
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500770 struct net *net = read_pnet(&dp->net);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700771 __be16 ethertype = key->eth.type;
772
773 if (!is_flow_key_valid(key)) {
774 if (eth_p_mpls(skb->protocol))
775 ethertype = skb->inner_protocol;
776 else
777 ethertype = vlan_get_protocol(skb);
778 }
779
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500780 ovs_fragment(net, vport, skb, mru, ethertype);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700781 } else {
782 kfree_skb(skb);
783 }
784 } else {
Jesse Grossccb13522011-10-25 19:26:31 -0700785 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700786 }
Jesse Grossccb13522011-10-25 19:26:31 -0700787}
788
789static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700790 struct sw_flow_key *key, const struct nlattr *attr,
William Tuf2a4d082016-06-10 11:49:33 -0700791 const struct nlattr *actions, int actions_len,
792 uint32_t cutlen)
Jesse Grossccb13522011-10-25 19:26:31 -0700793{
794 struct dp_upcall_info upcall;
795 const struct nlattr *a;
796 int rem;
797
Neil McKeeccea7442015-05-26 20:59:43 -0700798 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700799 upcall.cmd = OVS_PACKET_CMD_ACTION;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700800 upcall.mru = OVS_CB(skb)->mru;
Jesse Grossccb13522011-10-25 19:26:31 -0700801
802 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
803 a = nla_next(a, &rem)) {
804 switch (nla_type(a)) {
805 case OVS_USERSPACE_ATTR_USERDATA:
806 upcall.userdata = a;
807 break;
808
809 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +0000810 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -0700811 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800812
813 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
814 /* Get out tunnel info. */
815 struct vport *vport;
816
817 vport = ovs_vport_rcu(dp, nla_get_u32(a));
818 if (vport) {
819 int err;
820
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700821 err = dev_fill_metadata_dst(vport->dev, skb);
822 if (!err)
823 upcall.egress_tun_info = skb_tunnel_info(skb);
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800824 }
Pravin B Shelar4c222792015-08-30 18:09:38 -0700825
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800826 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700827 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800828
Neil McKeeccea7442015-05-26 20:59:43 -0700829 case OVS_USERSPACE_ATTR_ACTIONS: {
830 /* Include actions. */
831 upcall.actions = actions;
832 upcall.actions_len = actions_len;
833 break;
834 }
835
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800836 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -0700837 }
838
William Tuf2a4d082016-06-10 11:49:33 -0700839 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
Jesse Grossccb13522011-10-25 19:26:31 -0700840}
841
842static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700843 struct sw_flow_key *key, const struct nlattr *attr,
844 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700845{
846 const struct nlattr *acts_list = NULL;
847 const struct nlattr *a;
848 int rem;
William Tuf2a4d082016-06-10 11:49:33 -0700849 u32 cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700850
851 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
852 a = nla_next(a, &rem)) {
Wenyu Zhange05176a2015-08-05 00:30:47 -0700853 u32 probability;
854
Jesse Grossccb13522011-10-25 19:26:31 -0700855 switch (nla_type(a)) {
856 case OVS_SAMPLE_ATTR_PROBABILITY:
Wenyu Zhange05176a2015-08-05 00:30:47 -0700857 probability = nla_get_u32(a);
858 if (!probability || prandom_u32() > probability)
Jesse Grossccb13522011-10-25 19:26:31 -0700859 return 0;
860 break;
861
862 case OVS_SAMPLE_ATTR_ACTIONS:
863 acts_list = a;
864 break;
865 }
866 }
867
Simon Horman651887b2014-07-21 15:12:34 -0700868 rem = nla_len(acts_list);
869 a = nla_data(acts_list);
870
Andy Zhou32ae87f2014-09-15 19:33:50 -0700871 /* Actions list is empty, do nothing */
872 if (unlikely(!rem))
873 return 0;
Simon Horman651887b2014-07-21 15:12:34 -0700874
Andy Zhou32ae87f2014-09-15 19:33:50 -0700875 /* The only known usage of sample action is having a single user-space
William Tuf2a4d082016-06-10 11:49:33 -0700876 * action, or having a truncate action followed by a single user-space
Andy Zhou32ae87f2014-09-15 19:33:50 -0700877 * action. Treat this usage as a special case.
878 * The output_userspace() should clone the skb to be sent to the
879 * user space. This skb will be consumed by its caller.
Simon Horman651887b2014-07-21 15:12:34 -0700880 */
William Tuf2a4d082016-06-10 11:49:33 -0700881 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
882 struct ovs_action_trunc *trunc = nla_data(a);
883
884 if (skb->len > trunc->max_len)
885 cutlen = skb->len - trunc->max_len;
886
887 a = nla_next(a, &rem);
888 }
889
Andy Zhou32ae87f2014-09-15 19:33:50 -0700890 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
Simon Horman941d8eb2014-10-27 16:12:16 +0900891 nla_is_last(a, rem)))
William Tuf2a4d082016-06-10 11:49:33 -0700892 return output_userspace(dp, skb, key, a, actions,
893 actions_len, cutlen);
Andy Zhou32ae87f2014-09-15 19:33:50 -0700894
895 skb = skb_clone(skb, GFP_ATOMIC);
896 if (!skb)
897 /* Skip the sample action when out of memory. */
898 return 0;
899
Andy Zhou971427f32014-09-15 19:37:25 -0700900 if (!add_deferred_actions(skb, key, a)) {
901 if (net_ratelimit())
902 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
903 ovs_dp_name(dp));
904
905 kfree_skb(skb);
906 }
907 return 0;
908}
909
910static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
911 const struct nlattr *attr)
912{
913 struct ovs_action_hash *hash_act = nla_data(attr);
914 u32 hash = 0;
915
916 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
917 hash = skb_get_hash(skb);
918 hash = jhash_1word(hash, hash_act->hash_basis);
919 if (!hash)
920 hash = 0x1;
921
922 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -0700923}
924
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800925static int execute_set_action(struct sk_buff *skb,
926 struct sw_flow_key *flow_key,
927 const struct nlattr *a)
928{
929 /* Only tunnel set execution is supported without a mask. */
930 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +0200931 struct ovs_tunnel_info *tun = nla_data(a);
932
933 skb_dst_drop(skb);
934 dst_hold((struct dst_entry *)tun->tun_dst);
935 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800936 return 0;
937 }
938
939 return -EINVAL;
940}
941
942/* Mask is at the midpoint of the data. */
943#define get_mask(a, type) ((const type)nla_data(a) + 1)
944
945static int execute_masked_set_action(struct sk_buff *skb,
946 struct sw_flow_key *flow_key,
947 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -0700948{
949 int err = 0;
950
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800951 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -0700952 case OVS_KEY_ATTR_PRIORITY:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700953 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
954 *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800955 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -0700956 break;
957
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800958 case OVS_KEY_ATTR_SKB_MARK:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700959 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800960 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800961 break;
962
Jesse Grossf0b128c2014-10-03 15:35:31 -0700963 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800964 /* Masked data not supported for tunnel. */
965 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -0700966 break;
967
Jesse Grossccb13522011-10-25 19:26:31 -0700968 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800969 err = set_eth_addr(skb, flow_key, nla_data(a),
970 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -0700971 break;
972
973 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800974 err = set_ipv4(skb, flow_key, nla_data(a),
975 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -0700976 break;
977
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800978 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800979 err = set_ipv6(skb, flow_key, nla_data(a),
980 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800981 break;
982
Jesse Grossccb13522011-10-25 19:26:31 -0700983 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800984 err = set_tcp(skb, flow_key, nla_data(a),
985 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700986 break;
987
988 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800989 err = set_udp(skb, flow_key, nla_data(a),
990 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700991 break;
Joe Stringera175a722013-08-22 12:30:48 -0700992
993 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800994 err = set_sctp(skb, flow_key, nla_data(a),
995 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -0700996 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700997
998 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800999 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1000 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001001 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001002
1003 case OVS_KEY_ATTR_CT_STATE:
1004 case OVS_KEY_ATTR_CT_ZONE:
Joe Stringer182e3042015-08-26 11:31:49 -07001005 case OVS_KEY_ATTR_CT_MARK:
Joe Stringer33db4122015-10-01 15:00:37 -07001006 case OVS_KEY_ATTR_CT_LABELS:
Joe Stringer7f8a4362015-08-26 11:31:48 -07001007 err = -EINVAL;
1008 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001009 }
1010
1011 return err;
1012}
1013
Andy Zhou971427f32014-09-15 19:37:25 -07001014static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1015 struct sw_flow_key *key,
1016 const struct nlattr *a, int rem)
1017{
1018 struct deferred_action *da;
Andy Zhou971427f32014-09-15 19:37:25 -07001019
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001020 if (!is_flow_key_valid(key)) {
1021 int err;
1022
1023 err = ovs_flow_key_update(skb, key);
1024 if (err)
1025 return err;
1026 }
1027 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -07001028
Simon Horman941d8eb2014-10-27 16:12:16 +09001029 if (!nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -07001030 /* Recirc action is the not the last action
1031 * of the action list, need to clone the skb.
1032 */
1033 skb = skb_clone(skb, GFP_ATOMIC);
1034
1035 /* Skip the recirc action when out of memory, but
1036 * continue on with the rest of the action list.
1037 */
1038 if (!skb)
1039 return 0;
1040 }
1041
1042 da = add_deferred_actions(skb, key, NULL);
1043 if (da) {
1044 da->pkt_key.recirc_id = nla_get_u32(a);
1045 } else {
1046 kfree_skb(skb);
1047
1048 if (net_ratelimit())
1049 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1050 ovs_dp_name(dp));
1051 }
1052
1053 return 0;
1054}
1055
Jesse Grossccb13522011-10-25 19:26:31 -07001056/* Execute a list of actions against 'skb'. */
1057static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001058 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -07001059 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -07001060{
1061 /* Every output action needs a separate clone of 'skb', but the common
1062 * case is just a single output action, so that doing a clone and
1063 * then freeing the original skbuff is wasteful. So the following code
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001064 * is slightly obscure just to avoid that.
1065 */
Jesse Grossccb13522011-10-25 19:26:31 -07001066 int prev_port = -1;
1067 const struct nlattr *a;
1068 int rem;
1069
1070 for (a = attr, rem = len; rem > 0;
1071 a = nla_next(a, &rem)) {
1072 int err = 0;
1073
Andy Zhou738967b2014-09-08 00:35:02 -07001074 if (unlikely(prev_port != -1)) {
1075 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1076
1077 if (out_skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001078 do_output(dp, out_skb, prev_port, key);
Andy Zhou738967b2014-09-08 00:35:02 -07001079
William Tuf2a4d082016-06-10 11:49:33 -07001080 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001081 prev_port = -1;
1082 }
1083
1084 switch (nla_type(a)) {
1085 case OVS_ACTION_ATTR_OUTPUT:
1086 prev_port = nla_get_u32(a);
1087 break;
1088
William Tuf2a4d082016-06-10 11:49:33 -07001089 case OVS_ACTION_ATTR_TRUNC: {
1090 struct ovs_action_trunc *trunc = nla_data(a);
1091
1092 if (skb->len > trunc->max_len)
1093 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1094 break;
1095 }
1096
Jesse Grossccb13522011-10-25 19:26:31 -07001097 case OVS_ACTION_ATTR_USERSPACE:
William Tuf2a4d082016-06-10 11:49:33 -07001098 output_userspace(dp, skb, key, a, attr,
1099 len, OVS_CB(skb)->cutlen);
1100 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001101 break;
1102
Andy Zhou971427f32014-09-15 19:37:25 -07001103 case OVS_ACTION_ATTR_HASH:
1104 execute_hash(skb, key, a);
1105 break;
1106
Simon Horman25cd9ba2014-10-06 05:05:13 -07001107 case OVS_ACTION_ATTR_PUSH_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001108 err = push_mpls(skb, key, nla_data(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001109 break;
1110
1111 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001112 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001113 break;
1114
Jesse Grossccb13522011-10-25 19:26:31 -07001115 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001116 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001117 break;
1118
1119 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001120 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -07001121 break;
1122
Andy Zhou971427f32014-09-15 19:37:25 -07001123 case OVS_ACTION_ATTR_RECIRC:
1124 err = execute_recirc(dp, skb, key, a, rem);
Simon Horman941d8eb2014-10-27 16:12:16 +09001125 if (nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -07001126 /* If this is the last action, the skb has
1127 * been consumed or freed.
1128 * Return immediately.
1129 */
1130 return err;
1131 }
1132 break;
1133
Jesse Grossccb13522011-10-25 19:26:31 -07001134 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001135 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001136 break;
1137
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001138 case OVS_ACTION_ATTR_SET_MASKED:
1139 case OVS_ACTION_ATTR_SET_TO_MASKED:
1140 err = execute_masked_set_action(skb, key, nla_data(a));
1141 break;
1142
Jesse Grossccb13522011-10-25 19:26:31 -07001143 case OVS_ACTION_ATTR_SAMPLE:
Neil McKeeccea7442015-05-26 20:59:43 -07001144 err = sample(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -07001145 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001146
1147 case OVS_ACTION_ATTR_CT:
Joe Stringerec0d0432015-10-06 10:59:58 -07001148 if (!is_flow_key_valid(key)) {
1149 err = ovs_flow_key_update(skb, key);
1150 if (err)
1151 return err;
1152 }
1153
Joe Stringer7f8a4362015-08-26 11:31:48 -07001154 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1155 nla_data(a));
1156
1157 /* Hide stolen IP fragments from user space. */
Joe Stringer74c16612015-10-25 20:21:48 -07001158 if (err)
1159 return err == -EINPROGRESS ? 0 : err;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001160 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001161 }
1162
1163 if (unlikely(err)) {
1164 kfree_skb(skb);
1165 return err;
1166 }
1167 }
1168
Simon Horman651887b2014-07-21 15:12:34 -07001169 if (prev_port != -1)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001170 do_output(dp, skb, prev_port, key);
Simon Horman651887b2014-07-21 15:12:34 -07001171 else
Jesse Grossccb13522011-10-25 19:26:31 -07001172 consume_skb(skb);
1173
1174 return 0;
1175}
1176
Andy Zhou971427f32014-09-15 19:37:25 -07001177static void process_deferred_actions(struct datapath *dp)
1178{
1179 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1180
1181 /* Do not touch the FIFO in case there is no deferred actions. */
1182 if (action_fifo_is_empty(fifo))
1183 return;
1184
1185 /* Finishing executing all deferred actions. */
1186 do {
1187 struct deferred_action *da = action_fifo_get(fifo);
1188 struct sk_buff *skb = da->skb;
1189 struct sw_flow_key *key = &da->pkt_key;
1190 const struct nlattr *actions = da->actions;
1191
1192 if (actions)
1193 do_execute_actions(dp, skb, key, actions,
1194 nla_len(actions));
1195 else
1196 ovs_dp_process_packet(skb, key);
1197 } while (!action_fifo_is_empty(fifo));
1198
1199 /* Reset FIFO for the next packet. */
1200 action_fifo_init(fifo);
1201}
1202
Jesse Grossccb13522011-10-25 19:26:31 -07001203/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001204int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -08001205 const struct sw_flow_actions *acts,
1206 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -07001207{
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001208 static const int ovs_recursion_limit = 5;
1209 int err, level;
Jesse Grossccb13522011-10-25 19:26:31 -07001210
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001211 level = __this_cpu_inc_return(exec_actions_level);
1212 if (unlikely(level > ovs_recursion_limit)) {
1213 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1214 ovs_dp_name(dp));
1215 kfree_skb(skb);
1216 err = -ENETDOWN;
1217 goto out;
1218 }
1219
Andy Zhou971427f32014-09-15 19:37:25 -07001220 err = do_execute_actions(dp, skb, key,
1221 acts->actions, acts->actions_len);
1222
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001223 if (level == 1)
Andy Zhou971427f32014-09-15 19:37:25 -07001224 process_deferred_actions(dp);
1225
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001226out:
1227 __this_cpu_dec(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -07001228 return err;
1229}
1230
1231int action_fifos_init(void)
1232{
1233 action_fifos = alloc_percpu(struct action_fifo);
1234 if (!action_fifos)
1235 return -ENOMEM;
1236
1237 return 0;
1238}
1239
1240void action_fifos_exit(void)
1241{
1242 free_percpu(action_fifos);
Jesse Grossccb13522011-10-25 19:26:31 -07001243}