blob: c6a39bf2c3b954481a7217fa2b0f95778a2376b8 [file] [log] [blame]
Jesse Grossccb13522011-10-25 19:26:31 -07001/*
Andy Zhou971427f32014-09-15 19:37:25 -07002 * Copyright (c) 2007-2014 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070025#include <linux/netfilter_ipv6.h>
Joe Stringera175a722013-08-22 12:30:48 -070026#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070027#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070032
Joe Stringer7f8a4362015-08-26 11:31:48 -070033#include <net/dst.h>
Jesse Grossccb13522011-10-25 19:26:31 -070034#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080035#include <net/ipv6.h>
Joe Stringer7b85b4d2015-08-27 15:25:46 -070036#include <net/ip6_fib.h>
Jesse Grossccb13522011-10-25 19:26:31 -070037#include <net/checksum.h>
38#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070039#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070040#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070041
42#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070043#include "flow.h"
Joe Stringer7f8a4362015-08-26 11:31:48 -070044#include "conntrack.h"
Jesse Grossccb13522011-10-25 19:26:31 -070045#include "vport.h"
46
47static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -070048 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -070049 const struct nlattr *attr, int len);
Jesse Grossccb13522011-10-25 19:26:31 -070050
Andy Zhou971427f32014-09-15 19:37:25 -070051struct deferred_action {
52 struct sk_buff *skb;
53 const struct nlattr *actions;
54
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
57};
58
Joe Stringer7f8a4362015-08-26 11:31:48 -070059#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60struct ovs_frag_data {
61 unsigned long dst;
62 struct vport *vport;
63 struct ovs_skb_cb cb;
64 __be16 inner_protocol;
65 __u16 vlan_tci;
66 __be16 vlan_proto;
67 unsigned int l2_len;
68 u8 l2_data[MAX_L2_LEN];
69};
70
71static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72
Andy Zhou971427f32014-09-15 19:37:25 -070073#define DEFERRED_ACTION_FIFO_SIZE 10
74struct action_fifo {
75 int head;
76 int tail;
77 /* Deferred action fifo queue storage. */
78 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
79};
80
81static struct action_fifo __percpu *action_fifos;
82static DEFINE_PER_CPU(int, exec_actions_level);
83
84static void action_fifo_init(struct action_fifo *fifo)
85{
86 fifo->head = 0;
87 fifo->tail = 0;
88}
89
Thomas Graf12eb18f2014-11-06 06:58:52 -080090static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -070091{
92 return (fifo->head == fifo->tail);
93}
94
95static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
96{
97 if (action_fifo_is_empty(fifo))
98 return NULL;
99
100 return &fifo->fifo[fifo->tail++];
101}
102
103static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
104{
105 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
106 return NULL;
107
108 return &fifo->fifo[fifo->head++];
109}
110
111/* Return true if fifo is not full */
112static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800113 const struct sw_flow_key *key,
Andy Zhou971427f32014-09-15 19:37:25 -0700114 const struct nlattr *attr)
115{
116 struct action_fifo *fifo;
117 struct deferred_action *da;
118
119 fifo = this_cpu_ptr(action_fifos);
120 da = action_fifo_put(fifo);
121 if (da) {
122 da->skb = skb;
123 da->actions = attr;
124 da->pkt_key = *key;
125 }
126
127 return da;
128}
129
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800130static void invalidate_flow_key(struct sw_flow_key *key)
131{
132 key->eth.type = htons(0);
133}
134
135static bool is_flow_key_valid(const struct sw_flow_key *key)
136{
137 return !!key->eth.type;
138}
139
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800140static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Simon Horman25cd9ba2014-10-06 05:05:13 -0700141 const struct ovs_action_push_mpls *mpls)
142{
143 __be32 *new_mpls_lse;
144 struct ethhdr *hdr;
145
146 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
147 if (skb->encapsulation)
148 return -ENOTSUPP;
149
150 if (skb_cow_head(skb, MPLS_HLEN) < 0)
151 return -ENOMEM;
152
153 skb_push(skb, MPLS_HLEN);
154 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
155 skb->mac_len);
156 skb_reset_mac_header(skb);
157
158 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
159 *new_mpls_lse = mpls->mpls_lse;
160
161 if (skb->ip_summed == CHECKSUM_COMPLETE)
162 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
163 MPLS_HLEN, 0));
164
165 hdr = eth_hdr(skb);
166 hdr->h_proto = mpls->mpls_ethertype;
167
Pravin B Shelarcbe7e762014-12-23 16:20:28 -0800168 if (!skb->inner_protocol)
169 skb_set_inner_protocol(skb, skb->protocol);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700170 skb->protocol = mpls->mpls_ethertype;
171
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800172 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700173 return 0;
174}
175
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800176static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
177 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700178{
179 struct ethhdr *hdr;
180 int err;
181
Jiri Pirkoe2195122014-11-19 14:05:01 +0100182 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700183 if (unlikely(err))
184 return err;
185
Jiri Pirko1abcd822014-11-19 14:04:55 +0100186 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700187
188 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
189 skb->mac_len);
190
191 __skb_pull(skb, MPLS_HLEN);
192 skb_reset_mac_header(skb);
193
194 /* skb_mpls_header() is used to locate the ethertype
195 * field correctly in the presence of VLAN tags.
196 */
197 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
198 hdr->h_proto = ethertype;
199 if (eth_p_mpls(skb->protocol))
200 skb->protocol = ethertype;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800201
202 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700203 return 0;
204}
205
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800206static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
207 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700208{
209 __be32 *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800210 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700211 int err;
212
Jiri Pirkoe2195122014-11-19 14:05:01 +0100213 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700214 if (unlikely(err))
215 return err;
216
217 stack = (__be32 *)skb_mpls_header(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700218 lse = OVS_MASKED(*stack, *mpls_lse, *mask);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700219 if (skb->ip_summed == CHECKSUM_COMPLETE) {
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800220 __be32 diff[] = { ~(*stack), lse };
221
Simon Horman25cd9ba2014-10-06 05:05:13 -0700222 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
223 ~skb->csum);
224 }
225
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800226 *stack = lse;
227 flow_key->mpls.top_lse = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700228 return 0;
229}
230
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800231static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700232{
Jesse Grossccb13522011-10-25 19:26:31 -0700233 int err;
234
Jiri Pirko93515d52014-11-19 14:05:02 +0100235 err = skb_vlan_pop(skb);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100236 if (skb_vlan_tag_present(skb))
Jiri Pirko93515d52014-11-19 14:05:02 +0100237 invalidate_flow_key(key);
238 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800239 key->eth.tci = 0;
Jiri Pirko93515d52014-11-19 14:05:02 +0100240 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700241}
242
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800243static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
244 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700245{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100246 if (skb_vlan_tag_present(skb))
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800247 invalidate_flow_key(key);
Jiri Pirko93515d52014-11-19 14:05:02 +0100248 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800249 key->eth.tci = vlan->vlan_tci;
Jiri Pirko93515d52014-11-19 14:05:02 +0100250 return skb_vlan_push(skb, vlan->vlan_tpid,
251 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
Jesse Grossccb13522011-10-25 19:26:31 -0700252}
253
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800254/* 'src' is already properly masked. */
255static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
256{
257 u16 *dst = (u16 *)dst_;
258 const u16 *src = (const u16 *)src_;
259 const u16 *mask = (const u16 *)mask_;
260
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700261 OVS_SET_MASKED(dst[0], src[0], mask[0]);
262 OVS_SET_MASKED(dst[1], src[1], mask[1]);
263 OVS_SET_MASKED(dst[2], src[2], mask[2]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800264}
265
266static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
267 const struct ovs_key_ethernet *key,
268 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700269{
270 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800271
Jiri Pirkoe2195122014-11-19 14:05:01 +0100272 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700273 if (unlikely(err))
274 return err;
275
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700276 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
277
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800278 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
279 mask->eth_src);
280 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
281 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700282
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700283 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
284
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800285 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
286 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700287 return 0;
288}
289
Glenn Griffin3576fd72015-08-03 09:56:54 -0700290static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
291 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700292{
293 int transport_len = skb->len - skb_transport_offset(skb);
294
Glenn Griffin3576fd72015-08-03 09:56:54 -0700295 if (nh->frag_off & htons(IP_OFFSET))
296 return;
297
Jesse Grossccb13522011-10-25 19:26:31 -0700298 if (nh->protocol == IPPROTO_TCP) {
299 if (likely(transport_len >= sizeof(struct tcphdr)))
300 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700301 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700302 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800303 if (likely(transport_len >= sizeof(struct udphdr))) {
304 struct udphdr *uh = udp_hdr(skb);
305
306 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
307 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700308 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800309 if (!uh->check)
310 uh->check = CSUM_MANGLED_0;
311 }
312 }
Jesse Grossccb13522011-10-25 19:26:31 -0700313 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700314}
Jesse Grossccb13522011-10-25 19:26:31 -0700315
Glenn Griffin3576fd72015-08-03 09:56:54 -0700316static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
317 __be32 *addr, __be32 new_addr)
318{
319 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700320 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800321 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700322 *addr = new_addr;
323}
324
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800325static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
326 __be32 addr[4], const __be32 new_addr[4])
327{
328 int transport_len = skb->len - skb_transport_offset(skb);
329
Jesse Gross856447d2014-11-11 14:32:20 -0800330 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800331 if (likely(transport_len >= sizeof(struct tcphdr)))
332 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700333 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800334 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800335 if (likely(transport_len >= sizeof(struct udphdr))) {
336 struct udphdr *uh = udp_hdr(skb);
337
338 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
339 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700340 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800341 if (!uh->check)
342 uh->check = CSUM_MANGLED_0;
343 }
344 }
Jesse Gross856447d2014-11-11 14:32:20 -0800345 } else if (l4_proto == NEXTHDR_ICMP) {
346 if (likely(transport_len >= sizeof(struct icmp6hdr)))
347 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700348 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800349 }
350}
351
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800352static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
353 const __be32 mask[4], __be32 masked[4])
354{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700355 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
356 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
357 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
358 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800359}
360
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800361static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
362 __be32 addr[4], const __be32 new_addr[4],
363 bool recalculate_csum)
364{
365 if (recalculate_csum)
366 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
367
Tom Herbert7539fad2013-12-15 22:12:18 -0800368 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800369 memcpy(addr, new_addr, sizeof(__be32[4]));
370}
371
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800372static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800373{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800374 /* Bits 21-24 are always unmasked, so this retains their values. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700375 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
376 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
377 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800378}
379
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800380static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
381 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800382{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700383 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800384
Jesse Grossccb13522011-10-25 19:26:31 -0700385 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
386 nh->ttl = new_ttl;
387}
388
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800389static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
390 const struct ovs_key_ipv4 *key,
391 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700392{
393 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800394 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700395 int err;
396
Jiri Pirkoe2195122014-11-19 14:05:01 +0100397 err = skb_ensure_writable(skb, skb_network_offset(skb) +
398 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700399 if (unlikely(err))
400 return err;
401
402 nh = ip_hdr(skb);
403
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800404 /* Setting an IP addresses is typically only a side effect of
405 * matching on them in the current userspace implementation, so it
406 * makes sense to check if the value actually changed.
407 */
408 if (mask->ipv4_src) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700409 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700410
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800411 if (unlikely(new_addr != nh->saddr)) {
412 set_ip_addr(skb, nh, &nh->saddr, new_addr);
413 flow_key->ipv4.addr.src = new_addr;
414 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800415 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800416 if (mask->ipv4_dst) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700417 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700418
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800419 if (unlikely(new_addr != nh->daddr)) {
420 set_ip_addr(skb, nh, &nh->daddr, new_addr);
421 flow_key->ipv4.addr.dst = new_addr;
422 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800423 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800424 if (mask->ipv4_tos) {
425 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
426 flow_key->ip.tos = nh->tos;
427 }
428 if (mask->ipv4_ttl) {
429 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
430 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800431 }
Jesse Grossccb13522011-10-25 19:26:31 -0700432
433 return 0;
434}
435
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800436static bool is_ipv6_mask_nonzero(const __be32 addr[4])
437{
438 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
439}
440
441static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
442 const struct ovs_key_ipv6 *key,
443 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800444{
445 struct ipv6hdr *nh;
446 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800447
Jiri Pirkoe2195122014-11-19 14:05:01 +0100448 err = skb_ensure_writable(skb, skb_network_offset(skb) +
449 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800450 if (unlikely(err))
451 return err;
452
453 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800454
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800455 /* Setting an IP addresses is typically only a side effect of
456 * matching on them in the current userspace implementation, so it
457 * makes sense to check if the value actually changed.
458 */
459 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
460 __be32 *saddr = (__be32 *)&nh->saddr;
461 __be32 masked[4];
462
463 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
464
465 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
466 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
467 true);
468 memcpy(&flow_key->ipv6.addr.src, masked,
469 sizeof(flow_key->ipv6.addr.src));
470 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800471 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800472 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800473 unsigned int offset = 0;
474 int flags = IP6_FH_F_SKIP_RH;
475 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800476 __be32 *daddr = (__be32 *)&nh->daddr;
477 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800478
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800479 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800480
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800481 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
482 if (ipv6_ext_hdr(nh->nexthdr))
483 recalc_csum = (ipv6_find_hdr(skb, &offset,
484 NEXTHDR_ROUTING,
485 NULL, &flags)
486 != NEXTHDR_ROUTING);
487
488 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
489 recalc_csum);
490 memcpy(&flow_key->ipv6.addr.dst, masked,
491 sizeof(flow_key->ipv6.addr.dst));
492 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800493 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800494 if (mask->ipv6_tclass) {
495 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
496 flow_key->ip.tos = ipv6_get_dsfield(nh);
497 }
498 if (mask->ipv6_label) {
499 set_ipv6_fl(nh, ntohl(key->ipv6_label),
500 ntohl(mask->ipv6_label));
501 flow_key->ipv6.label =
502 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
503 }
504 if (mask->ipv6_hlimit) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700505 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
506 mask->ipv6_hlimit);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800507 flow_key->ip.ttl = nh->hop_limit;
508 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800509 return 0;
510}
511
Jiri Pirkoe2195122014-11-19 14:05:01 +0100512/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700513static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800514 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700515{
Tom Herbert4b048d62015-08-17 13:42:25 -0700516 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700517 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700518}
519
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800520static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
521 const struct ovs_key_udp *key,
522 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700523{
524 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800525 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700526 int err;
527
Jiri Pirkoe2195122014-11-19 14:05:01 +0100528 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
529 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700530 if (unlikely(err))
531 return err;
532
533 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800534 /* Either of the masks is non-zero, so do not bother checking them. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700535 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
536 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800537
538 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
539 if (likely(src != uh->source)) {
540 set_tp_port(skb, &uh->source, src, &uh->check);
541 flow_key->tp.src = src;
542 }
543 if (likely(dst != uh->dest)) {
544 set_tp_port(skb, &uh->dest, dst, &uh->check);
545 flow_key->tp.dst = dst;
546 }
547
548 if (unlikely(!uh->check))
549 uh->check = CSUM_MANGLED_0;
550 } else {
551 uh->source = src;
552 uh->dest = dst;
553 flow_key->tp.src = src;
554 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800555 }
Jesse Grossccb13522011-10-25 19:26:31 -0700556
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800557 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700558
559 return 0;
560}
561
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800562static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
563 const struct ovs_key_tcp *key,
564 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700565{
566 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800567 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700568 int err;
569
Jiri Pirkoe2195122014-11-19 14:05:01 +0100570 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
571 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700572 if (unlikely(err))
573 return err;
574
575 th = tcp_hdr(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700576 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800577 if (likely(src != th->source)) {
578 set_tp_port(skb, &th->source, src, &th->check);
579 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800580 }
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700581 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800582 if (likely(dst != th->dest)) {
583 set_tp_port(skb, &th->dest, dst, &th->check);
584 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800585 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800586 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700587
588 return 0;
589}
590
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800591static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
592 const struct ovs_key_sctp *key,
593 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700594{
Joe Stringera175a722013-08-22 12:30:48 -0700595 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800596 struct sctphdr *sh;
597 __le32 old_correct_csum, new_csum, old_csum;
598 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700599
Jiri Pirkoe2195122014-11-19 14:05:01 +0100600 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700601 if (unlikely(err))
602 return err;
603
604 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800605 old_csum = sh->checksum;
606 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700607
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700608 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
609 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700610
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800611 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700612
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800613 /* Carry any checksum errors through. */
614 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700615
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800616 skb_clear_hash(skb);
617 flow_key->tp.src = sh->source;
618 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700619
620 return 0;
621}
622
Joe Stringer7f8a4362015-08-26 11:31:48 -0700623static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
624{
625 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
626 struct vport *vport = data->vport;
627
628 if (skb_cow_head(skb, data->l2_len) < 0) {
629 kfree_skb(skb);
630 return -ENOMEM;
631 }
632
633 __skb_dst_copy(skb, data->dst);
634 *OVS_CB(skb) = data->cb;
635 skb->inner_protocol = data->inner_protocol;
636 skb->vlan_tci = data->vlan_tci;
637 skb->vlan_proto = data->vlan_proto;
638
639 /* Reconstruct the MAC header. */
640 skb_push(skb, data->l2_len);
641 memcpy(skb->data, &data->l2_data, data->l2_len);
642 ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
643 skb_reset_mac_header(skb);
644
645 ovs_vport_send(vport, skb);
646 return 0;
647}
648
649static unsigned int
650ovs_dst_get_mtu(const struct dst_entry *dst)
651{
652 return dst->dev->mtu;
653}
654
655static struct dst_ops ovs_dst_ops = {
656 .family = AF_UNSPEC,
657 .mtu = ovs_dst_get_mtu,
658};
659
660/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
661 * ovs_vport_output(), which is called once per fragmented packet.
662 */
663static void prepare_frag(struct vport *vport, struct sk_buff *skb)
664{
665 unsigned int hlen = skb_network_offset(skb);
666 struct ovs_frag_data *data;
667
668 data = this_cpu_ptr(&ovs_frag_data_storage);
669 data->dst = skb->_skb_refdst;
670 data->vport = vport;
671 data->cb = *OVS_CB(skb);
672 data->inner_protocol = skb->inner_protocol;
673 data->vlan_tci = skb->vlan_tci;
674 data->vlan_proto = skb->vlan_proto;
675 data->l2_len = hlen;
676 memcpy(&data->l2_data, skb->data, hlen);
677
678 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
679 skb_pull(skb, hlen);
680}
681
682static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
683 __be16 ethertype)
684{
685 if (skb_network_offset(skb) > MAX_L2_LEN) {
686 OVS_NLERR(1, "L2 header too long to fragment");
Joe Stringerb8f22572015-10-06 10:59:57 -0700687 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700688 }
689
690 if (ethertype == htons(ETH_P_IP)) {
691 struct dst_entry ovs_dst;
692 unsigned long orig_dst;
693
694 prepare_frag(vport, skb);
695 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
696 DST_OBSOLETE_NONE, DST_NOCOUNT);
697 ovs_dst.dev = vport->dev;
698
699 orig_dst = skb->_skb_refdst;
700 skb_dst_set_noref(skb, &ovs_dst);
701 IPCB(skb)->frag_max_size = mru;
702
703 ip_do_fragment(skb->sk, skb, ovs_vport_output);
704 refdst_drop(orig_dst);
705 } else if (ethertype == htons(ETH_P_IPV6)) {
706 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
707 unsigned long orig_dst;
708 struct rt6_info ovs_rt;
709
710 if (!v6ops) {
Joe Stringerb8f22572015-10-06 10:59:57 -0700711 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700712 }
713
714 prepare_frag(vport, skb);
715 memset(&ovs_rt, 0, sizeof(ovs_rt));
716 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
717 DST_OBSOLETE_NONE, DST_NOCOUNT);
718 ovs_rt.dst.dev = vport->dev;
719
720 orig_dst = skb->_skb_refdst;
721 skb_dst_set_noref(skb, &ovs_rt.dst);
722 IP6CB(skb)->frag_max_size = mru;
723
724 v6ops->fragment(skb->sk, skb, ovs_vport_output);
725 refdst_drop(orig_dst);
726 } else {
727 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
728 ovs_vport_name(vport), ntohs(ethertype), mru,
729 vport->dev->mtu);
Joe Stringerb8f22572015-10-06 10:59:57 -0700730 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700731 }
Joe Stringerb8f22572015-10-06 10:59:57 -0700732
733 return;
734err:
735 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700736}
737
738static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
739 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700740{
Andy Zhou738967b2014-09-08 00:35:02 -0700741 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700742
Joe Stringer7f8a4362015-08-26 11:31:48 -0700743 if (likely(vport)) {
744 u16 mru = OVS_CB(skb)->mru;
745
746 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
747 ovs_vport_send(vport, skb);
748 } else if (mru <= vport->dev->mtu) {
749 __be16 ethertype = key->eth.type;
750
751 if (!is_flow_key_valid(key)) {
752 if (eth_p_mpls(skb->protocol))
753 ethertype = skb->inner_protocol;
754 else
755 ethertype = vlan_get_protocol(skb);
756 }
757
758 ovs_fragment(vport, skb, mru, ethertype);
759 } else {
760 kfree_skb(skb);
761 }
762 } else {
Jesse Grossccb13522011-10-25 19:26:31 -0700763 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700764 }
Jesse Grossccb13522011-10-25 19:26:31 -0700765}
766
767static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700768 struct sw_flow_key *key, const struct nlattr *attr,
769 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700770{
Thomas Graf1d8fff92015-07-21 10:43:54 +0200771 struct ip_tunnel_info info;
Jesse Grossccb13522011-10-25 19:26:31 -0700772 struct dp_upcall_info upcall;
773 const struct nlattr *a;
774 int rem;
775
Neil McKeeccea7442015-05-26 20:59:43 -0700776 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700777 upcall.cmd = OVS_PACKET_CMD_ACTION;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700778 upcall.mru = OVS_CB(skb)->mru;
Jesse Grossccb13522011-10-25 19:26:31 -0700779
780 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
781 a = nla_next(a, &rem)) {
782 switch (nla_type(a)) {
783 case OVS_USERSPACE_ATTR_USERDATA:
784 upcall.userdata = a;
785 break;
786
787 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +0000788 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -0700789 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800790
791 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
792 /* Get out tunnel info. */
793 struct vport *vport;
794
795 vport = ovs_vport_rcu(dp, nla_get_u32(a));
796 if (vport) {
797 int err;
798
Pravin B Shelar4c222792015-08-30 18:09:38 -0700799 upcall.egress_tun_info = &info;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800800 err = ovs_vport_get_egress_tun_info(vport, skb,
Pravin B Shelar4c222792015-08-30 18:09:38 -0700801 &upcall);
802 if (err)
803 upcall.egress_tun_info = NULL;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800804 }
Pravin B Shelar4c222792015-08-30 18:09:38 -0700805
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800806 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700807 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800808
Neil McKeeccea7442015-05-26 20:59:43 -0700809 case OVS_USERSPACE_ATTR_ACTIONS: {
810 /* Include actions. */
811 upcall.actions = actions;
812 upcall.actions_len = actions_len;
813 break;
814 }
815
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800816 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -0700817 }
818
Pravin B Shelare8eedb82014-11-06 06:57:27 -0800819 return ovs_dp_upcall(dp, skb, key, &upcall);
Jesse Grossccb13522011-10-25 19:26:31 -0700820}
821
822static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700823 struct sw_flow_key *key, const struct nlattr *attr,
824 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700825{
826 const struct nlattr *acts_list = NULL;
827 const struct nlattr *a;
828 int rem;
829
830 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
831 a = nla_next(a, &rem)) {
Wenyu Zhange05176a2015-08-05 00:30:47 -0700832 u32 probability;
833
Jesse Grossccb13522011-10-25 19:26:31 -0700834 switch (nla_type(a)) {
835 case OVS_SAMPLE_ATTR_PROBABILITY:
Wenyu Zhange05176a2015-08-05 00:30:47 -0700836 probability = nla_get_u32(a);
837 if (!probability || prandom_u32() > probability)
Jesse Grossccb13522011-10-25 19:26:31 -0700838 return 0;
839 break;
840
841 case OVS_SAMPLE_ATTR_ACTIONS:
842 acts_list = a;
843 break;
844 }
845 }
846
Simon Horman651887b2014-07-21 15:12:34 -0700847 rem = nla_len(acts_list);
848 a = nla_data(acts_list);
849
Andy Zhou32ae87f2014-09-15 19:33:50 -0700850 /* Actions list is empty, do nothing */
851 if (unlikely(!rem))
852 return 0;
Simon Horman651887b2014-07-21 15:12:34 -0700853
Andy Zhou32ae87f2014-09-15 19:33:50 -0700854 /* The only known usage of sample action is having a single user-space
855 * action. Treat this usage as a special case.
856 * The output_userspace() should clone the skb to be sent to the
857 * user space. This skb will be consumed by its caller.
Simon Horman651887b2014-07-21 15:12:34 -0700858 */
Andy Zhou32ae87f2014-09-15 19:33:50 -0700859 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
Simon Horman941d8eb2014-10-27 16:12:16 +0900860 nla_is_last(a, rem)))
Neil McKeeccea7442015-05-26 20:59:43 -0700861 return output_userspace(dp, skb, key, a, actions, actions_len);
Andy Zhou32ae87f2014-09-15 19:33:50 -0700862
863 skb = skb_clone(skb, GFP_ATOMIC);
864 if (!skb)
865 /* Skip the sample action when out of memory. */
866 return 0;
867
Andy Zhou971427f32014-09-15 19:37:25 -0700868 if (!add_deferred_actions(skb, key, a)) {
869 if (net_ratelimit())
870 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
871 ovs_dp_name(dp));
872
873 kfree_skb(skb);
874 }
875 return 0;
876}
877
878static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
879 const struct nlattr *attr)
880{
881 struct ovs_action_hash *hash_act = nla_data(attr);
882 u32 hash = 0;
883
884 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
885 hash = skb_get_hash(skb);
886 hash = jhash_1word(hash, hash_act->hash_basis);
887 if (!hash)
888 hash = 0x1;
889
890 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -0700891}
892
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800893static int execute_set_action(struct sk_buff *skb,
894 struct sw_flow_key *flow_key,
895 const struct nlattr *a)
896{
897 /* Only tunnel set execution is supported without a mask. */
898 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +0200899 struct ovs_tunnel_info *tun = nla_data(a);
900
901 skb_dst_drop(skb);
902 dst_hold((struct dst_entry *)tun->tun_dst);
903 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800904 return 0;
905 }
906
907 return -EINVAL;
908}
909
910/* Mask is at the midpoint of the data. */
911#define get_mask(a, type) ((const type)nla_data(a) + 1)
912
913static int execute_masked_set_action(struct sk_buff *skb,
914 struct sw_flow_key *flow_key,
915 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -0700916{
917 int err = 0;
918
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800919 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -0700920 case OVS_KEY_ATTR_PRIORITY:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700921 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
922 *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800923 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -0700924 break;
925
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800926 case OVS_KEY_ATTR_SKB_MARK:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700927 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800928 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800929 break;
930
Jesse Grossf0b128c2014-10-03 15:35:31 -0700931 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800932 /* Masked data not supported for tunnel. */
933 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -0700934 break;
935
Jesse Grossccb13522011-10-25 19:26:31 -0700936 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800937 err = set_eth_addr(skb, flow_key, nla_data(a),
938 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -0700939 break;
940
941 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800942 err = set_ipv4(skb, flow_key, nla_data(a),
943 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -0700944 break;
945
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800946 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800947 err = set_ipv6(skb, flow_key, nla_data(a),
948 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800949 break;
950
Jesse Grossccb13522011-10-25 19:26:31 -0700951 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800952 err = set_tcp(skb, flow_key, nla_data(a),
953 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700954 break;
955
956 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800957 err = set_udp(skb, flow_key, nla_data(a),
958 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700959 break;
Joe Stringera175a722013-08-22 12:30:48 -0700960
961 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800962 err = set_sctp(skb, flow_key, nla_data(a),
963 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -0700964 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700965
966 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800967 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
968 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -0700969 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700970
971 case OVS_KEY_ATTR_CT_STATE:
972 case OVS_KEY_ATTR_CT_ZONE:
Joe Stringer182e3042015-08-26 11:31:49 -0700973 case OVS_KEY_ATTR_CT_MARK:
Joe Stringer33db4122015-10-01 15:00:37 -0700974 case OVS_KEY_ATTR_CT_LABELS:
Joe Stringer7f8a4362015-08-26 11:31:48 -0700975 err = -EINVAL;
976 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700977 }
978
979 return err;
980}
981
Andy Zhou971427f32014-09-15 19:37:25 -0700982static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
983 struct sw_flow_key *key,
984 const struct nlattr *a, int rem)
985{
986 struct deferred_action *da;
Andy Zhou971427f32014-09-15 19:37:25 -0700987
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800988 if (!is_flow_key_valid(key)) {
989 int err;
990
991 err = ovs_flow_key_update(skb, key);
992 if (err)
993 return err;
994 }
995 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -0700996
Simon Horman941d8eb2014-10-27 16:12:16 +0900997 if (!nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -0700998 /* Recirc action is the not the last action
999 * of the action list, need to clone the skb.
1000 */
1001 skb = skb_clone(skb, GFP_ATOMIC);
1002
1003 /* Skip the recirc action when out of memory, but
1004 * continue on with the rest of the action list.
1005 */
1006 if (!skb)
1007 return 0;
1008 }
1009
1010 da = add_deferred_actions(skb, key, NULL);
1011 if (da) {
1012 da->pkt_key.recirc_id = nla_get_u32(a);
1013 } else {
1014 kfree_skb(skb);
1015
1016 if (net_ratelimit())
1017 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1018 ovs_dp_name(dp));
1019 }
1020
1021 return 0;
1022}
1023
Jesse Grossccb13522011-10-25 19:26:31 -07001024/* Execute a list of actions against 'skb'. */
1025static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001026 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -07001027 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -07001028{
1029 /* Every output action needs a separate clone of 'skb', but the common
1030 * case is just a single output action, so that doing a clone and
1031 * then freeing the original skbuff is wasteful. So the following code
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001032 * is slightly obscure just to avoid that.
1033 */
Jesse Grossccb13522011-10-25 19:26:31 -07001034 int prev_port = -1;
1035 const struct nlattr *a;
1036 int rem;
1037
1038 for (a = attr, rem = len; rem > 0;
1039 a = nla_next(a, &rem)) {
1040 int err = 0;
1041
Andy Zhou738967b2014-09-08 00:35:02 -07001042 if (unlikely(prev_port != -1)) {
1043 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1044
1045 if (out_skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001046 do_output(dp, out_skb, prev_port, key);
Andy Zhou738967b2014-09-08 00:35:02 -07001047
Jesse Grossccb13522011-10-25 19:26:31 -07001048 prev_port = -1;
1049 }
1050
1051 switch (nla_type(a)) {
1052 case OVS_ACTION_ATTR_OUTPUT:
1053 prev_port = nla_get_u32(a);
1054 break;
1055
1056 case OVS_ACTION_ATTR_USERSPACE:
Neil McKeeccea7442015-05-26 20:59:43 -07001057 output_userspace(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -07001058 break;
1059
Andy Zhou971427f32014-09-15 19:37:25 -07001060 case OVS_ACTION_ATTR_HASH:
1061 execute_hash(skb, key, a);
1062 break;
1063
Simon Horman25cd9ba2014-10-06 05:05:13 -07001064 case OVS_ACTION_ATTR_PUSH_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001065 err = push_mpls(skb, key, nla_data(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001066 break;
1067
1068 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001069 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001070 break;
1071
Jesse Grossccb13522011-10-25 19:26:31 -07001072 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001073 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001074 break;
1075
1076 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001077 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -07001078 break;
1079
Andy Zhou971427f32014-09-15 19:37:25 -07001080 case OVS_ACTION_ATTR_RECIRC:
1081 err = execute_recirc(dp, skb, key, a, rem);
Simon Horman941d8eb2014-10-27 16:12:16 +09001082 if (nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -07001083 /* If this is the last action, the skb has
1084 * been consumed or freed.
1085 * Return immediately.
1086 */
1087 return err;
1088 }
1089 break;
1090
Jesse Grossccb13522011-10-25 19:26:31 -07001091 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001092 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001093 break;
1094
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001095 case OVS_ACTION_ATTR_SET_MASKED:
1096 case OVS_ACTION_ATTR_SET_TO_MASKED:
1097 err = execute_masked_set_action(skb, key, nla_data(a));
1098 break;
1099
Jesse Grossccb13522011-10-25 19:26:31 -07001100 case OVS_ACTION_ATTR_SAMPLE:
Neil McKeeccea7442015-05-26 20:59:43 -07001101 err = sample(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -07001102 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001103
1104 case OVS_ACTION_ATTR_CT:
Joe Stringerec0d0432015-10-06 10:59:58 -07001105 if (!is_flow_key_valid(key)) {
1106 err = ovs_flow_key_update(skb, key);
1107 if (err)
1108 return err;
1109 }
1110
Joe Stringer7f8a4362015-08-26 11:31:48 -07001111 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1112 nla_data(a));
1113
1114 /* Hide stolen IP fragments from user space. */
1115 if (err == -EINPROGRESS)
1116 return 0;
1117 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001118 }
1119
1120 if (unlikely(err)) {
1121 kfree_skb(skb);
1122 return err;
1123 }
1124 }
1125
Simon Horman651887b2014-07-21 15:12:34 -07001126 if (prev_port != -1)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001127 do_output(dp, skb, prev_port, key);
Simon Horman651887b2014-07-21 15:12:34 -07001128 else
Jesse Grossccb13522011-10-25 19:26:31 -07001129 consume_skb(skb);
1130
1131 return 0;
1132}
1133
Andy Zhou971427f32014-09-15 19:37:25 -07001134static void process_deferred_actions(struct datapath *dp)
1135{
1136 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1137
1138 /* Do not touch the FIFO in case there is no deferred actions. */
1139 if (action_fifo_is_empty(fifo))
1140 return;
1141
1142 /* Finishing executing all deferred actions. */
1143 do {
1144 struct deferred_action *da = action_fifo_get(fifo);
1145 struct sk_buff *skb = da->skb;
1146 struct sw_flow_key *key = &da->pkt_key;
1147 const struct nlattr *actions = da->actions;
1148
1149 if (actions)
1150 do_execute_actions(dp, skb, key, actions,
1151 nla_len(actions));
1152 else
1153 ovs_dp_process_packet(skb, key);
1154 } while (!action_fifo_is_empty(fifo));
1155
1156 /* Reset FIFO for the next packet. */
1157 action_fifo_init(fifo);
1158}
1159
Jesse Grossccb13522011-10-25 19:26:31 -07001160/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001161int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -08001162 const struct sw_flow_actions *acts,
1163 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -07001164{
Andy Zhou971427f32014-09-15 19:37:25 -07001165 int level = this_cpu_read(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -07001166 int err;
Jesse Grossccb13522011-10-25 19:26:31 -07001167
Andy Zhou971427f32014-09-15 19:37:25 -07001168 this_cpu_inc(exec_actions_level);
1169 err = do_execute_actions(dp, skb, key,
1170 acts->actions, acts->actions_len);
1171
1172 if (!level)
1173 process_deferred_actions(dp);
1174
1175 this_cpu_dec(exec_actions_level);
1176 return err;
1177}
1178
1179int action_fifos_init(void)
1180{
1181 action_fifos = alloc_percpu(struct action_fifo);
1182 if (!action_fifos)
1183 return -ENOMEM;
1184
1185 return 0;
1186}
1187
1188void action_fifos_exit(void)
1189{
1190 free_percpu(action_fifos);
Jesse Grossccb13522011-10-25 19:26:31 -07001191}