blob: 4f4200717bef984d02b9232ea764180f9e8203d7 [file] [log] [blame]
Jesse Grossccb13522011-10-25 19:26:31 -07001/*
Andy Zhou971427f32014-09-15 19:37:25 -07002 * Copyright (c) 2007-2014 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
Joe Stringera175a722013-08-22 12:30:48 -070025#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070026#include <linux/tcp.h>
27#include <linux/udp.h>
28#include <linux/in6.h>
29#include <linux/if_arp.h>
30#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070031
Jesse Grossccb13522011-10-25 19:26:31 -070032#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080033#include <net/ipv6.h>
Jesse Grossccb13522011-10-25 19:26:31 -070034#include <net/checksum.h>
35#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070036#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070037#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070038
39#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070040#include "flow.h"
Jesse Grossccb13522011-10-25 19:26:31 -070041#include "vport.h"
42
43static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -070044 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -070045 const struct nlattr *attr, int len);
Jesse Grossccb13522011-10-25 19:26:31 -070046
Andy Zhou971427f32014-09-15 19:37:25 -070047struct deferred_action {
48 struct sk_buff *skb;
49 const struct nlattr *actions;
50
51 /* Store pkt_key clone when creating deferred action. */
52 struct sw_flow_key pkt_key;
53};
54
55#define DEFERRED_ACTION_FIFO_SIZE 10
56struct action_fifo {
57 int head;
58 int tail;
59 /* Deferred action fifo queue storage. */
60 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
61};
62
63static struct action_fifo __percpu *action_fifos;
64static DEFINE_PER_CPU(int, exec_actions_level);
65
66static void action_fifo_init(struct action_fifo *fifo)
67{
68 fifo->head = 0;
69 fifo->tail = 0;
70}
71
Thomas Graf12eb18f2014-11-06 06:58:52 -080072static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -070073{
74 return (fifo->head == fifo->tail);
75}
76
77static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
78{
79 if (action_fifo_is_empty(fifo))
80 return NULL;
81
82 return &fifo->fifo[fifo->tail++];
83}
84
85static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
86{
87 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
88 return NULL;
89
90 return &fifo->fifo[fifo->head++];
91}
92
93/* Return true if fifo is not full */
94static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -080095 const struct sw_flow_key *key,
Andy Zhou971427f32014-09-15 19:37:25 -070096 const struct nlattr *attr)
97{
98 struct action_fifo *fifo;
99 struct deferred_action *da;
100
101 fifo = this_cpu_ptr(action_fifos);
102 da = action_fifo_put(fifo);
103 if (da) {
104 da->skb = skb;
105 da->actions = attr;
106 da->pkt_key = *key;
107 }
108
109 return da;
110}
111
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800112static void invalidate_flow_key(struct sw_flow_key *key)
113{
114 key->eth.type = htons(0);
115}
116
117static bool is_flow_key_valid(const struct sw_flow_key *key)
118{
119 return !!key->eth.type;
120}
121
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800122static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Simon Horman25cd9ba2014-10-06 05:05:13 -0700123 const struct ovs_action_push_mpls *mpls)
124{
125 __be32 *new_mpls_lse;
126 struct ethhdr *hdr;
127
128 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
129 if (skb->encapsulation)
130 return -ENOTSUPP;
131
132 if (skb_cow_head(skb, MPLS_HLEN) < 0)
133 return -ENOMEM;
134
135 skb_push(skb, MPLS_HLEN);
136 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
137 skb->mac_len);
138 skb_reset_mac_header(skb);
139
140 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
141 *new_mpls_lse = mpls->mpls_lse;
142
143 if (skb->ip_summed == CHECKSUM_COMPLETE)
144 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
145 MPLS_HLEN, 0));
146
147 hdr = eth_hdr(skb);
148 hdr->h_proto = mpls->mpls_ethertype;
149
Pravin B Shelarcbe7e762014-12-23 16:20:28 -0800150 if (!skb->inner_protocol)
151 skb_set_inner_protocol(skb, skb->protocol);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700152 skb->protocol = mpls->mpls_ethertype;
153
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800154 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700155 return 0;
156}
157
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800158static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
159 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700160{
161 struct ethhdr *hdr;
162 int err;
163
Jiri Pirkoe2195122014-11-19 14:05:01 +0100164 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700165 if (unlikely(err))
166 return err;
167
Jiri Pirko1abcd822014-11-19 14:04:55 +0100168 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700169
170 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
171 skb->mac_len);
172
173 __skb_pull(skb, MPLS_HLEN);
174 skb_reset_mac_header(skb);
175
176 /* skb_mpls_header() is used to locate the ethertype
177 * field correctly in the presence of VLAN tags.
178 */
179 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
180 hdr->h_proto = ethertype;
181 if (eth_p_mpls(skb->protocol))
182 skb->protocol = ethertype;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800183
184 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700185 return 0;
186}
187
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800188/* 'KEY' must not have any bits set outside of the 'MASK' */
189#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
190#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
191
192static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
193 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700194{
195 __be32 *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800196 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700197 int err;
198
Jiri Pirkoe2195122014-11-19 14:05:01 +0100199 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700200 if (unlikely(err))
201 return err;
202
203 stack = (__be32 *)skb_mpls_header(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800204 lse = MASKED(*stack, *mpls_lse, *mask);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700205 if (skb->ip_summed == CHECKSUM_COMPLETE) {
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800206 __be32 diff[] = { ~(*stack), lse };
207
Simon Horman25cd9ba2014-10-06 05:05:13 -0700208 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
209 ~skb->csum);
210 }
211
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800212 *stack = lse;
213 flow_key->mpls.top_lse = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700214 return 0;
215}
216
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800217static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700218{
Jesse Grossccb13522011-10-25 19:26:31 -0700219 int err;
220
Jiri Pirko93515d52014-11-19 14:05:02 +0100221 err = skb_vlan_pop(skb);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100222 if (skb_vlan_tag_present(skb))
Jiri Pirko93515d52014-11-19 14:05:02 +0100223 invalidate_flow_key(key);
224 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800225 key->eth.tci = 0;
Jiri Pirko93515d52014-11-19 14:05:02 +0100226 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700227}
228
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800229static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
230 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700231{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100232 if (skb_vlan_tag_present(skb))
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800233 invalidate_flow_key(key);
Jiri Pirko93515d52014-11-19 14:05:02 +0100234 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800235 key->eth.tci = vlan->vlan_tci;
Jiri Pirko93515d52014-11-19 14:05:02 +0100236 return skb_vlan_push(skb, vlan->vlan_tpid,
237 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
Jesse Grossccb13522011-10-25 19:26:31 -0700238}
239
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800240/* 'src' is already properly masked. */
241static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
242{
243 u16 *dst = (u16 *)dst_;
244 const u16 *src = (const u16 *)src_;
245 const u16 *mask = (const u16 *)mask_;
246
247 SET_MASKED(dst[0], src[0], mask[0]);
248 SET_MASKED(dst[1], src[1], mask[1]);
249 SET_MASKED(dst[2], src[2], mask[2]);
250}
251
252static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
253 const struct ovs_key_ethernet *key,
254 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700255{
256 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800257
Jiri Pirkoe2195122014-11-19 14:05:01 +0100258 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700259 if (unlikely(err))
260 return err;
261
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700262 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
263
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800264 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
265 mask->eth_src);
266 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
267 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700268
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700269 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
270
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800271 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
272 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700273 return 0;
274}
275
Glenn Griffin3576fd72015-08-03 09:56:54 -0700276static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
277 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700278{
279 int transport_len = skb->len - skb_transport_offset(skb);
280
Glenn Griffin3576fd72015-08-03 09:56:54 -0700281 if (nh->frag_off & htons(IP_OFFSET))
282 return;
283
Jesse Grossccb13522011-10-25 19:26:31 -0700284 if (nh->protocol == IPPROTO_TCP) {
285 if (likely(transport_len >= sizeof(struct tcphdr)))
286 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700287 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700288 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800289 if (likely(transport_len >= sizeof(struct udphdr))) {
290 struct udphdr *uh = udp_hdr(skb);
291
292 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
293 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700294 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800295 if (!uh->check)
296 uh->check = CSUM_MANGLED_0;
297 }
298 }
Jesse Grossccb13522011-10-25 19:26:31 -0700299 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700300}
Jesse Grossccb13522011-10-25 19:26:31 -0700301
Glenn Griffin3576fd72015-08-03 09:56:54 -0700302static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
303 __be32 *addr, __be32 new_addr)
304{
305 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700306 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800307 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700308 *addr = new_addr;
309}
310
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800311static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
312 __be32 addr[4], const __be32 new_addr[4])
313{
314 int transport_len = skb->len - skb_transport_offset(skb);
315
Jesse Gross856447d2014-11-11 14:32:20 -0800316 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800317 if (likely(transport_len >= sizeof(struct tcphdr)))
318 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700319 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800320 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800321 if (likely(transport_len >= sizeof(struct udphdr))) {
322 struct udphdr *uh = udp_hdr(skb);
323
324 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
325 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700326 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800327 if (!uh->check)
328 uh->check = CSUM_MANGLED_0;
329 }
330 }
Jesse Gross856447d2014-11-11 14:32:20 -0800331 } else if (l4_proto == NEXTHDR_ICMP) {
332 if (likely(transport_len >= sizeof(struct icmp6hdr)))
333 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700334 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800335 }
336}
337
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800338static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
339 const __be32 mask[4], __be32 masked[4])
340{
341 masked[0] = MASKED(old[0], addr[0], mask[0]);
342 masked[1] = MASKED(old[1], addr[1], mask[1]);
343 masked[2] = MASKED(old[2], addr[2], mask[2]);
344 masked[3] = MASKED(old[3], addr[3], mask[3]);
345}
346
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800347static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
348 __be32 addr[4], const __be32 new_addr[4],
349 bool recalculate_csum)
350{
351 if (recalculate_csum)
352 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
353
Tom Herbert7539fad2013-12-15 22:12:18 -0800354 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800355 memcpy(addr, new_addr, sizeof(__be32[4]));
356}
357
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800358static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800359{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800360 /* Bits 21-24 are always unmasked, so this retains their values. */
361 SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
362 SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
363 SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800364}
365
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800366static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
367 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800368{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800369 new_ttl = MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800370
Jesse Grossccb13522011-10-25 19:26:31 -0700371 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
372 nh->ttl = new_ttl;
373}
374
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800375static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
376 const struct ovs_key_ipv4 *key,
377 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700378{
379 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800380 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700381 int err;
382
Jiri Pirkoe2195122014-11-19 14:05:01 +0100383 err = skb_ensure_writable(skb, skb_network_offset(skb) +
384 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700385 if (unlikely(err))
386 return err;
387
388 nh = ip_hdr(skb);
389
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800390 /* Setting an IP addresses is typically only a side effect of
391 * matching on them in the current userspace implementation, so it
392 * makes sense to check if the value actually changed.
393 */
394 if (mask->ipv4_src) {
395 new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700396
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800397 if (unlikely(new_addr != nh->saddr)) {
398 set_ip_addr(skb, nh, &nh->saddr, new_addr);
399 flow_key->ipv4.addr.src = new_addr;
400 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800401 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800402 if (mask->ipv4_dst) {
403 new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700404
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800405 if (unlikely(new_addr != nh->daddr)) {
406 set_ip_addr(skb, nh, &nh->daddr, new_addr);
407 flow_key->ipv4.addr.dst = new_addr;
408 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800409 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800410 if (mask->ipv4_tos) {
411 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
412 flow_key->ip.tos = nh->tos;
413 }
414 if (mask->ipv4_ttl) {
415 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
416 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800417 }
Jesse Grossccb13522011-10-25 19:26:31 -0700418
419 return 0;
420}
421
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800422static bool is_ipv6_mask_nonzero(const __be32 addr[4])
423{
424 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
425}
426
427static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
428 const struct ovs_key_ipv6 *key,
429 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800430{
431 struct ipv6hdr *nh;
432 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800433
Jiri Pirkoe2195122014-11-19 14:05:01 +0100434 err = skb_ensure_writable(skb, skb_network_offset(skb) +
435 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800436 if (unlikely(err))
437 return err;
438
439 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800440
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800441 /* Setting an IP addresses is typically only a side effect of
442 * matching on them in the current userspace implementation, so it
443 * makes sense to check if the value actually changed.
444 */
445 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
446 __be32 *saddr = (__be32 *)&nh->saddr;
447 __be32 masked[4];
448
449 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
450
451 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
452 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
453 true);
454 memcpy(&flow_key->ipv6.addr.src, masked,
455 sizeof(flow_key->ipv6.addr.src));
456 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800457 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800458 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800459 unsigned int offset = 0;
460 int flags = IP6_FH_F_SKIP_RH;
461 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800462 __be32 *daddr = (__be32 *)&nh->daddr;
463 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800464
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800465 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800466
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800467 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
468 if (ipv6_ext_hdr(nh->nexthdr))
469 recalc_csum = (ipv6_find_hdr(skb, &offset,
470 NEXTHDR_ROUTING,
471 NULL, &flags)
472 != NEXTHDR_ROUTING);
473
474 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
475 recalc_csum);
476 memcpy(&flow_key->ipv6.addr.dst, masked,
477 sizeof(flow_key->ipv6.addr.dst));
478 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800479 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800480 if (mask->ipv6_tclass) {
481 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
482 flow_key->ip.tos = ipv6_get_dsfield(nh);
483 }
484 if (mask->ipv6_label) {
485 set_ipv6_fl(nh, ntohl(key->ipv6_label),
486 ntohl(mask->ipv6_label));
487 flow_key->ipv6.label =
488 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
489 }
490 if (mask->ipv6_hlimit) {
491 SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
492 flow_key->ip.ttl = nh->hop_limit;
493 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800494 return 0;
495}
496
Jiri Pirkoe2195122014-11-19 14:05:01 +0100497/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700498static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800499 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700500{
Tom Herbert4b048d62015-08-17 13:42:25 -0700501 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700502 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700503}
504
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800505static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
506 const struct ovs_key_udp *key,
507 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700508{
509 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800510 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700511 int err;
512
Jiri Pirkoe2195122014-11-19 14:05:01 +0100513 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
514 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700515 if (unlikely(err))
516 return err;
517
518 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800519 /* Either of the masks is non-zero, so do not bother checking them. */
520 src = MASKED(uh->source, key->udp_src, mask->udp_src);
521 dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
522
523 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
524 if (likely(src != uh->source)) {
525 set_tp_port(skb, &uh->source, src, &uh->check);
526 flow_key->tp.src = src;
527 }
528 if (likely(dst != uh->dest)) {
529 set_tp_port(skb, &uh->dest, dst, &uh->check);
530 flow_key->tp.dst = dst;
531 }
532
533 if (unlikely(!uh->check))
534 uh->check = CSUM_MANGLED_0;
535 } else {
536 uh->source = src;
537 uh->dest = dst;
538 flow_key->tp.src = src;
539 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800540 }
Jesse Grossccb13522011-10-25 19:26:31 -0700541
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800542 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700543
544 return 0;
545}
546
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800547static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
548 const struct ovs_key_tcp *key,
549 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700550{
551 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800552 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700553 int err;
554
Jiri Pirkoe2195122014-11-19 14:05:01 +0100555 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
556 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700557 if (unlikely(err))
558 return err;
559
560 th = tcp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800561 src = MASKED(th->source, key->tcp_src, mask->tcp_src);
562 if (likely(src != th->source)) {
563 set_tp_port(skb, &th->source, src, &th->check);
564 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800565 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800566 dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
567 if (likely(dst != th->dest)) {
568 set_tp_port(skb, &th->dest, dst, &th->check);
569 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800570 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800571 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700572
573 return 0;
574}
575
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800576static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
577 const struct ovs_key_sctp *key,
578 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700579{
Joe Stringera175a722013-08-22 12:30:48 -0700580 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800581 struct sctphdr *sh;
582 __le32 old_correct_csum, new_csum, old_csum;
583 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700584
Jiri Pirkoe2195122014-11-19 14:05:01 +0100585 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700586 if (unlikely(err))
587 return err;
588
589 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800590 old_csum = sh->checksum;
591 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700592
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800593 sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
594 sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700595
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800596 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700597
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800598 /* Carry any checksum errors through. */
599 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700600
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800601 skb_clear_hash(skb);
602 flow_key->tp.src = sh->source;
603 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700604
605 return 0;
606}
607
Andy Zhou738967b2014-09-08 00:35:02 -0700608static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
Jesse Grossccb13522011-10-25 19:26:31 -0700609{
Andy Zhou738967b2014-09-08 00:35:02 -0700610 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700611
Andy Zhou738967b2014-09-08 00:35:02 -0700612 if (likely(vport))
613 ovs_vport_send(vport, skb);
614 else
Jesse Grossccb13522011-10-25 19:26:31 -0700615 kfree_skb(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700616}
617
618static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700619 struct sw_flow_key *key, const struct nlattr *attr,
620 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700621{
Thomas Graf1d8fff92015-07-21 10:43:54 +0200622 struct ip_tunnel_info info;
Jesse Grossccb13522011-10-25 19:26:31 -0700623 struct dp_upcall_info upcall;
624 const struct nlattr *a;
625 int rem;
626
Neil McKeeccea7442015-05-26 20:59:43 -0700627 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700628 upcall.cmd = OVS_PACKET_CMD_ACTION;
Jesse Grossccb13522011-10-25 19:26:31 -0700629
630 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
631 a = nla_next(a, &rem)) {
632 switch (nla_type(a)) {
633 case OVS_USERSPACE_ATTR_USERDATA:
634 upcall.userdata = a;
635 break;
636
637 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +0000638 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -0700639 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800640
641 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
642 /* Get out tunnel info. */
643 struct vport *vport;
644
645 vport = ovs_vport_rcu(dp, nla_get_u32(a));
646 if (vport) {
647 int err;
648
649 err = ovs_vport_get_egress_tun_info(vport, skb,
650 &info);
651 if (!err)
652 upcall.egress_tun_info = &info;
653 }
654 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700655 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800656
Neil McKeeccea7442015-05-26 20:59:43 -0700657 case OVS_USERSPACE_ATTR_ACTIONS: {
658 /* Include actions. */
659 upcall.actions = actions;
660 upcall.actions_len = actions_len;
661 break;
662 }
663
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800664 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -0700665 }
666
Pravin B Shelare8eedb82014-11-06 06:57:27 -0800667 return ovs_dp_upcall(dp, skb, key, &upcall);
Jesse Grossccb13522011-10-25 19:26:31 -0700668}
669
670static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700671 struct sw_flow_key *key, const struct nlattr *attr,
672 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700673{
674 const struct nlattr *acts_list = NULL;
675 const struct nlattr *a;
676 int rem;
677
678 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
679 a = nla_next(a, &rem)) {
Wenyu Zhange05176a2015-08-05 00:30:47 -0700680 u32 probability;
681
Jesse Grossccb13522011-10-25 19:26:31 -0700682 switch (nla_type(a)) {
683 case OVS_SAMPLE_ATTR_PROBABILITY:
Wenyu Zhange05176a2015-08-05 00:30:47 -0700684 probability = nla_get_u32(a);
685 if (!probability || prandom_u32() > probability)
Jesse Grossccb13522011-10-25 19:26:31 -0700686 return 0;
687 break;
688
689 case OVS_SAMPLE_ATTR_ACTIONS:
690 acts_list = a;
691 break;
692 }
693 }
694
Simon Horman651887b2014-07-21 15:12:34 -0700695 rem = nla_len(acts_list);
696 a = nla_data(acts_list);
697
Andy Zhou32ae87f2014-09-15 19:33:50 -0700698 /* Actions list is empty, do nothing */
699 if (unlikely(!rem))
700 return 0;
Simon Horman651887b2014-07-21 15:12:34 -0700701
Andy Zhou32ae87f2014-09-15 19:33:50 -0700702 /* The only known usage of sample action is having a single user-space
703 * action. Treat this usage as a special case.
704 * The output_userspace() should clone the skb to be sent to the
705 * user space. This skb will be consumed by its caller.
Simon Horman651887b2014-07-21 15:12:34 -0700706 */
Andy Zhou32ae87f2014-09-15 19:33:50 -0700707 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
Simon Horman941d8eb2014-10-27 16:12:16 +0900708 nla_is_last(a, rem)))
Neil McKeeccea7442015-05-26 20:59:43 -0700709 return output_userspace(dp, skb, key, a, actions, actions_len);
Andy Zhou32ae87f2014-09-15 19:33:50 -0700710
711 skb = skb_clone(skb, GFP_ATOMIC);
712 if (!skb)
713 /* Skip the sample action when out of memory. */
714 return 0;
715
Andy Zhou971427f32014-09-15 19:37:25 -0700716 if (!add_deferred_actions(skb, key, a)) {
717 if (net_ratelimit())
718 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
719 ovs_dp_name(dp));
720
721 kfree_skb(skb);
722 }
723 return 0;
724}
725
726static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
727 const struct nlattr *attr)
728{
729 struct ovs_action_hash *hash_act = nla_data(attr);
730 u32 hash = 0;
731
732 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
733 hash = skb_get_hash(skb);
734 hash = jhash_1word(hash, hash_act->hash_basis);
735 if (!hash)
736 hash = 0x1;
737
738 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -0700739}
740
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800741static int execute_set_action(struct sk_buff *skb,
742 struct sw_flow_key *flow_key,
743 const struct nlattr *a)
744{
745 /* Only tunnel set execution is supported without a mask. */
746 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +0200747 struct ovs_tunnel_info *tun = nla_data(a);
748
749 skb_dst_drop(skb);
750 dst_hold((struct dst_entry *)tun->tun_dst);
751 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
752
753 /* FIXME: Remove when all vports have been converted */
754 OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;
755
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800756 return 0;
757 }
758
759 return -EINVAL;
760}
761
762/* Mask is at the midpoint of the data. */
763#define get_mask(a, type) ((const type)nla_data(a) + 1)
764
765static int execute_masked_set_action(struct sk_buff *skb,
766 struct sw_flow_key *flow_key,
767 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -0700768{
769 int err = 0;
770
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800771 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -0700772 case OVS_KEY_ATTR_PRIORITY:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800773 SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
774 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -0700775 break;
776
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800777 case OVS_KEY_ATTR_SKB_MARK:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800778 SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
779 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800780 break;
781
Jesse Grossf0b128c2014-10-03 15:35:31 -0700782 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800783 /* Masked data not supported for tunnel. */
784 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -0700785 break;
786
Jesse Grossccb13522011-10-25 19:26:31 -0700787 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800788 err = set_eth_addr(skb, flow_key, nla_data(a),
789 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -0700790 break;
791
792 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800793 err = set_ipv4(skb, flow_key, nla_data(a),
794 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -0700795 break;
796
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800797 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800798 err = set_ipv6(skb, flow_key, nla_data(a),
799 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800800 break;
801
Jesse Grossccb13522011-10-25 19:26:31 -0700802 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800803 err = set_tcp(skb, flow_key, nla_data(a),
804 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700805 break;
806
807 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800808 err = set_udp(skb, flow_key, nla_data(a),
809 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700810 break;
Joe Stringera175a722013-08-22 12:30:48 -0700811
812 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800813 err = set_sctp(skb, flow_key, nla_data(a),
814 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -0700815 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700816
817 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800818 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
819 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -0700820 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700821 }
822
823 return err;
824}
825
Andy Zhou971427f32014-09-15 19:37:25 -0700826static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
827 struct sw_flow_key *key,
828 const struct nlattr *a, int rem)
829{
830 struct deferred_action *da;
Andy Zhou971427f32014-09-15 19:37:25 -0700831
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800832 if (!is_flow_key_valid(key)) {
833 int err;
834
835 err = ovs_flow_key_update(skb, key);
836 if (err)
837 return err;
838 }
839 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -0700840
Simon Horman941d8eb2014-10-27 16:12:16 +0900841 if (!nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -0700842 /* Recirc action is the not the last action
843 * of the action list, need to clone the skb.
844 */
845 skb = skb_clone(skb, GFP_ATOMIC);
846
847 /* Skip the recirc action when out of memory, but
848 * continue on with the rest of the action list.
849 */
850 if (!skb)
851 return 0;
852 }
853
854 da = add_deferred_actions(skb, key, NULL);
855 if (da) {
856 da->pkt_key.recirc_id = nla_get_u32(a);
857 } else {
858 kfree_skb(skb);
859
860 if (net_ratelimit())
861 pr_warn("%s: deferred action limit reached, drop recirc action\n",
862 ovs_dp_name(dp));
863 }
864
865 return 0;
866}
867
Jesse Grossccb13522011-10-25 19:26:31 -0700868/* Execute a list of actions against 'skb'. */
869static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -0700870 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -0700871 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -0700872{
873 /* Every output action needs a separate clone of 'skb', but the common
874 * case is just a single output action, so that doing a clone and
875 * then freeing the original skbuff is wasteful. So the following code
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800876 * is slightly obscure just to avoid that.
877 */
Jesse Grossccb13522011-10-25 19:26:31 -0700878 int prev_port = -1;
879 const struct nlattr *a;
880 int rem;
881
882 for (a = attr, rem = len; rem > 0;
883 a = nla_next(a, &rem)) {
884 int err = 0;
885
Andy Zhou738967b2014-09-08 00:35:02 -0700886 if (unlikely(prev_port != -1)) {
887 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
888
889 if (out_skb)
890 do_output(dp, out_skb, prev_port);
891
Jesse Grossccb13522011-10-25 19:26:31 -0700892 prev_port = -1;
893 }
894
895 switch (nla_type(a)) {
896 case OVS_ACTION_ATTR_OUTPUT:
897 prev_port = nla_get_u32(a);
898 break;
899
900 case OVS_ACTION_ATTR_USERSPACE:
Neil McKeeccea7442015-05-26 20:59:43 -0700901 output_userspace(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -0700902 break;
903
Andy Zhou971427f32014-09-15 19:37:25 -0700904 case OVS_ACTION_ATTR_HASH:
905 execute_hash(skb, key, a);
906 break;
907
Simon Horman25cd9ba2014-10-06 05:05:13 -0700908 case OVS_ACTION_ATTR_PUSH_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800909 err = push_mpls(skb, key, nla_data(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -0700910 break;
911
912 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800913 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -0700914 break;
915
Jesse Grossccb13522011-10-25 19:26:31 -0700916 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800917 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -0700918 break;
919
920 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800921 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -0700922 break;
923
Andy Zhou971427f32014-09-15 19:37:25 -0700924 case OVS_ACTION_ATTR_RECIRC:
925 err = execute_recirc(dp, skb, key, a, rem);
Simon Horman941d8eb2014-10-27 16:12:16 +0900926 if (nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -0700927 /* If this is the last action, the skb has
928 * been consumed or freed.
929 * Return immediately.
930 */
931 return err;
932 }
933 break;
934
Jesse Grossccb13522011-10-25 19:26:31 -0700935 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800936 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -0700937 break;
938
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800939 case OVS_ACTION_ATTR_SET_MASKED:
940 case OVS_ACTION_ATTR_SET_TO_MASKED:
941 err = execute_masked_set_action(skb, key, nla_data(a));
942 break;
943
Jesse Grossccb13522011-10-25 19:26:31 -0700944 case OVS_ACTION_ATTR_SAMPLE:
Neil McKeeccea7442015-05-26 20:59:43 -0700945 err = sample(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -0700946 break;
947 }
948
949 if (unlikely(err)) {
950 kfree_skb(skb);
951 return err;
952 }
953 }
954
Simon Horman651887b2014-07-21 15:12:34 -0700955 if (prev_port != -1)
Jesse Grossccb13522011-10-25 19:26:31 -0700956 do_output(dp, skb, prev_port);
Simon Horman651887b2014-07-21 15:12:34 -0700957 else
Jesse Grossccb13522011-10-25 19:26:31 -0700958 consume_skb(skb);
959
960 return 0;
961}
962
Andy Zhou971427f32014-09-15 19:37:25 -0700963static void process_deferred_actions(struct datapath *dp)
964{
965 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
966
967 /* Do not touch the FIFO in case there is no deferred actions. */
968 if (action_fifo_is_empty(fifo))
969 return;
970
971 /* Finishing executing all deferred actions. */
972 do {
973 struct deferred_action *da = action_fifo_get(fifo);
974 struct sk_buff *skb = da->skb;
975 struct sw_flow_key *key = &da->pkt_key;
976 const struct nlattr *actions = da->actions;
977
978 if (actions)
979 do_execute_actions(dp, skb, key, actions,
980 nla_len(actions));
981 else
982 ovs_dp_process_packet(skb, key);
983 } while (!action_fifo_is_empty(fifo));
984
985 /* Reset FIFO for the next packet. */
986 action_fifo_init(fifo);
987}
988
Jesse Grossccb13522011-10-25 19:26:31 -0700989/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -0700990int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800991 const struct sw_flow_actions *acts,
992 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700993{
Andy Zhou971427f32014-09-15 19:37:25 -0700994 int level = this_cpu_read(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -0700995 int err;
Jesse Grossccb13522011-10-25 19:26:31 -0700996
Andy Zhou971427f32014-09-15 19:37:25 -0700997 this_cpu_inc(exec_actions_level);
Jesse Grossf0b128c2014-10-03 15:35:31 -0700998 OVS_CB(skb)->egress_tun_info = NULL;
Andy Zhou971427f32014-09-15 19:37:25 -0700999 err = do_execute_actions(dp, skb, key,
1000 acts->actions, acts->actions_len);
1001
1002 if (!level)
1003 process_deferred_actions(dp);
1004
1005 this_cpu_dec(exec_actions_level);
1006 return err;
1007}
1008
1009int action_fifos_init(void)
1010{
1011 action_fifos = alloc_percpu(struct action_fifo);
1012 if (!action_fifos)
1013 return -ENOMEM;
1014
1015 return 0;
1016}
1017
1018void action_fifos_exit(void)
1019{
1020 free_percpu(action_fifos);
Jesse Grossccb13522011-10-25 19:26:31 -07001021}