blob: 520438b77dc8c187e021f378b6acbdf81232ccc4 [file] [log] [blame]
Jesse Grossccb13522011-10-25 19:26:31 -07001/*
Andy Zhou971427f32014-09-15 19:37:25 -07002 * Copyright (c) 2007-2014 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
Joe Stringera175a722013-08-22 12:30:48 -070025#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070026#include <linux/tcp.h>
27#include <linux/udp.h>
28#include <linux/in6.h>
29#include <linux/if_arp.h>
30#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070031
Jesse Grossccb13522011-10-25 19:26:31 -070032#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080033#include <net/ipv6.h>
Jesse Grossccb13522011-10-25 19:26:31 -070034#include <net/checksum.h>
35#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070036#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070037#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070038
39#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070040#include "flow.h"
Jesse Grossccb13522011-10-25 19:26:31 -070041#include "vport.h"
42
43static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -070044 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -070045 const struct nlattr *attr, int len);
Jesse Grossccb13522011-10-25 19:26:31 -070046
Andy Zhou971427f32014-09-15 19:37:25 -070047struct deferred_action {
48 struct sk_buff *skb;
49 const struct nlattr *actions;
50
51 /* Store pkt_key clone when creating deferred action. */
52 struct sw_flow_key pkt_key;
53};
54
55#define DEFERRED_ACTION_FIFO_SIZE 10
56struct action_fifo {
57 int head;
58 int tail;
59 /* Deferred action fifo queue storage. */
60 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
61};
62
63static struct action_fifo __percpu *action_fifos;
64static DEFINE_PER_CPU(int, exec_actions_level);
65
66static void action_fifo_init(struct action_fifo *fifo)
67{
68 fifo->head = 0;
69 fifo->tail = 0;
70}
71
Thomas Graf12eb18f2014-11-06 06:58:52 -080072static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -070073{
74 return (fifo->head == fifo->tail);
75}
76
77static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
78{
79 if (action_fifo_is_empty(fifo))
80 return NULL;
81
82 return &fifo->fifo[fifo->tail++];
83}
84
85static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
86{
87 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
88 return NULL;
89
90 return &fifo->fifo[fifo->head++];
91}
92
93/* Return true if fifo is not full */
94static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -080095 const struct sw_flow_key *key,
Andy Zhou971427f32014-09-15 19:37:25 -070096 const struct nlattr *attr)
97{
98 struct action_fifo *fifo;
99 struct deferred_action *da;
100
101 fifo = this_cpu_ptr(action_fifos);
102 da = action_fifo_put(fifo);
103 if (da) {
104 da->skb = skb;
105 da->actions = attr;
106 da->pkt_key = *key;
107 }
108
109 return da;
110}
111
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800112static void invalidate_flow_key(struct sw_flow_key *key)
113{
114 key->eth.type = htons(0);
115}
116
117static bool is_flow_key_valid(const struct sw_flow_key *key)
118{
119 return !!key->eth.type;
120}
121
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800122static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Simon Horman25cd9ba2014-10-06 05:05:13 -0700123 const struct ovs_action_push_mpls *mpls)
124{
125 __be32 *new_mpls_lse;
126 struct ethhdr *hdr;
127
128 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
129 if (skb->encapsulation)
130 return -ENOTSUPP;
131
132 if (skb_cow_head(skb, MPLS_HLEN) < 0)
133 return -ENOMEM;
134
135 skb_push(skb, MPLS_HLEN);
136 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
137 skb->mac_len);
138 skb_reset_mac_header(skb);
139
140 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
141 *new_mpls_lse = mpls->mpls_lse;
142
143 if (skb->ip_summed == CHECKSUM_COMPLETE)
144 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
145 MPLS_HLEN, 0));
146
147 hdr = eth_hdr(skb);
148 hdr->h_proto = mpls->mpls_ethertype;
149
Pravin B Shelarcbe7e762014-12-23 16:20:28 -0800150 if (!skb->inner_protocol)
151 skb_set_inner_protocol(skb, skb->protocol);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700152 skb->protocol = mpls->mpls_ethertype;
153
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800154 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700155 return 0;
156}
157
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800158static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
159 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700160{
161 struct ethhdr *hdr;
162 int err;
163
Jiri Pirkoe2195122014-11-19 14:05:01 +0100164 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700165 if (unlikely(err))
166 return err;
167
Jiri Pirko1abcd822014-11-19 14:04:55 +0100168 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700169
170 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
171 skb->mac_len);
172
173 __skb_pull(skb, MPLS_HLEN);
174 skb_reset_mac_header(skb);
175
176 /* skb_mpls_header() is used to locate the ethertype
177 * field correctly in the presence of VLAN tags.
178 */
179 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
180 hdr->h_proto = ethertype;
181 if (eth_p_mpls(skb->protocol))
182 skb->protocol = ethertype;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800183
184 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700185 return 0;
186}
187
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800188static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
189 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700190{
191 __be32 *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800192 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700193 int err;
194
Jiri Pirkoe2195122014-11-19 14:05:01 +0100195 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700196 if (unlikely(err))
197 return err;
198
199 stack = (__be32 *)skb_mpls_header(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700200 lse = OVS_MASKED(*stack, *mpls_lse, *mask);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700201 if (skb->ip_summed == CHECKSUM_COMPLETE) {
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800202 __be32 diff[] = { ~(*stack), lse };
203
Simon Horman25cd9ba2014-10-06 05:05:13 -0700204 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
205 ~skb->csum);
206 }
207
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800208 *stack = lse;
209 flow_key->mpls.top_lse = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700210 return 0;
211}
212
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800213static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700214{
Jesse Grossccb13522011-10-25 19:26:31 -0700215 int err;
216
Jiri Pirko93515d52014-11-19 14:05:02 +0100217 err = skb_vlan_pop(skb);
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100218 if (skb_vlan_tag_present(skb))
Jiri Pirko93515d52014-11-19 14:05:02 +0100219 invalidate_flow_key(key);
220 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800221 key->eth.tci = 0;
Jiri Pirko93515d52014-11-19 14:05:02 +0100222 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700223}
224
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800225static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
226 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700227{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100228 if (skb_vlan_tag_present(skb))
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800229 invalidate_flow_key(key);
Jiri Pirko93515d52014-11-19 14:05:02 +0100230 else
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800231 key->eth.tci = vlan->vlan_tci;
Jiri Pirko93515d52014-11-19 14:05:02 +0100232 return skb_vlan_push(skb, vlan->vlan_tpid,
233 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
Jesse Grossccb13522011-10-25 19:26:31 -0700234}
235
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800236/* 'src' is already properly masked. */
237static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
238{
239 u16 *dst = (u16 *)dst_;
240 const u16 *src = (const u16 *)src_;
241 const u16 *mask = (const u16 *)mask_;
242
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700243 OVS_SET_MASKED(dst[0], src[0], mask[0]);
244 OVS_SET_MASKED(dst[1], src[1], mask[1]);
245 OVS_SET_MASKED(dst[2], src[2], mask[2]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800246}
247
248static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
249 const struct ovs_key_ethernet *key,
250 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700251{
252 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800253
Jiri Pirkoe2195122014-11-19 14:05:01 +0100254 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700255 if (unlikely(err))
256 return err;
257
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700258 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
259
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800260 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
261 mask->eth_src);
262 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
263 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700264
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700265 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
266
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800267 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
268 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700269 return 0;
270}
271
Glenn Griffin3576fd72015-08-03 09:56:54 -0700272static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
273 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700274{
275 int transport_len = skb->len - skb_transport_offset(skb);
276
Glenn Griffin3576fd72015-08-03 09:56:54 -0700277 if (nh->frag_off & htons(IP_OFFSET))
278 return;
279
Jesse Grossccb13522011-10-25 19:26:31 -0700280 if (nh->protocol == IPPROTO_TCP) {
281 if (likely(transport_len >= sizeof(struct tcphdr)))
282 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700283 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700284 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800285 if (likely(transport_len >= sizeof(struct udphdr))) {
286 struct udphdr *uh = udp_hdr(skb);
287
288 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
289 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700290 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800291 if (!uh->check)
292 uh->check = CSUM_MANGLED_0;
293 }
294 }
Jesse Grossccb13522011-10-25 19:26:31 -0700295 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700296}
Jesse Grossccb13522011-10-25 19:26:31 -0700297
Glenn Griffin3576fd72015-08-03 09:56:54 -0700298static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
299 __be32 *addr, __be32 new_addr)
300{
301 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700302 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800303 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700304 *addr = new_addr;
305}
306
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800307static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
308 __be32 addr[4], const __be32 new_addr[4])
309{
310 int transport_len = skb->len - skb_transport_offset(skb);
311
Jesse Gross856447d2014-11-11 14:32:20 -0800312 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800313 if (likely(transport_len >= sizeof(struct tcphdr)))
314 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700315 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800316 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800317 if (likely(transport_len >= sizeof(struct udphdr))) {
318 struct udphdr *uh = udp_hdr(skb);
319
320 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
321 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700322 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800323 if (!uh->check)
324 uh->check = CSUM_MANGLED_0;
325 }
326 }
Jesse Gross856447d2014-11-11 14:32:20 -0800327 } else if (l4_proto == NEXTHDR_ICMP) {
328 if (likely(transport_len >= sizeof(struct icmp6hdr)))
329 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700330 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800331 }
332}
333
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800334static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
335 const __be32 mask[4], __be32 masked[4])
336{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700337 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
338 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
339 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
340 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800341}
342
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800343static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
344 __be32 addr[4], const __be32 new_addr[4],
345 bool recalculate_csum)
346{
347 if (recalculate_csum)
348 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
349
Tom Herbert7539fad2013-12-15 22:12:18 -0800350 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800351 memcpy(addr, new_addr, sizeof(__be32[4]));
352}
353
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800354static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800355{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800356 /* Bits 21-24 are always unmasked, so this retains their values. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700357 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
358 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
359 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800360}
361
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800362static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
363 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800364{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700365 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800366
Jesse Grossccb13522011-10-25 19:26:31 -0700367 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
368 nh->ttl = new_ttl;
369}
370
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800371static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
372 const struct ovs_key_ipv4 *key,
373 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700374{
375 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800376 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700377 int err;
378
Jiri Pirkoe2195122014-11-19 14:05:01 +0100379 err = skb_ensure_writable(skb, skb_network_offset(skb) +
380 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700381 if (unlikely(err))
382 return err;
383
384 nh = ip_hdr(skb);
385
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800386 /* Setting an IP addresses is typically only a side effect of
387 * matching on them in the current userspace implementation, so it
388 * makes sense to check if the value actually changed.
389 */
390 if (mask->ipv4_src) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700391 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700392
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800393 if (unlikely(new_addr != nh->saddr)) {
394 set_ip_addr(skb, nh, &nh->saddr, new_addr);
395 flow_key->ipv4.addr.src = new_addr;
396 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800397 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800398 if (mask->ipv4_dst) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700399 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700400
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800401 if (unlikely(new_addr != nh->daddr)) {
402 set_ip_addr(skb, nh, &nh->daddr, new_addr);
403 flow_key->ipv4.addr.dst = new_addr;
404 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800405 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800406 if (mask->ipv4_tos) {
407 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
408 flow_key->ip.tos = nh->tos;
409 }
410 if (mask->ipv4_ttl) {
411 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
412 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800413 }
Jesse Grossccb13522011-10-25 19:26:31 -0700414
415 return 0;
416}
417
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800418static bool is_ipv6_mask_nonzero(const __be32 addr[4])
419{
420 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
421}
422
423static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
424 const struct ovs_key_ipv6 *key,
425 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800426{
427 struct ipv6hdr *nh;
428 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800429
Jiri Pirkoe2195122014-11-19 14:05:01 +0100430 err = skb_ensure_writable(skb, skb_network_offset(skb) +
431 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800432 if (unlikely(err))
433 return err;
434
435 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800436
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800437 /* Setting an IP addresses is typically only a side effect of
438 * matching on them in the current userspace implementation, so it
439 * makes sense to check if the value actually changed.
440 */
441 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
442 __be32 *saddr = (__be32 *)&nh->saddr;
443 __be32 masked[4];
444
445 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
446
447 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
448 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
449 true);
450 memcpy(&flow_key->ipv6.addr.src, masked,
451 sizeof(flow_key->ipv6.addr.src));
452 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800453 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800454 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800455 unsigned int offset = 0;
456 int flags = IP6_FH_F_SKIP_RH;
457 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800458 __be32 *daddr = (__be32 *)&nh->daddr;
459 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800460
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800461 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800462
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800463 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
464 if (ipv6_ext_hdr(nh->nexthdr))
465 recalc_csum = (ipv6_find_hdr(skb, &offset,
466 NEXTHDR_ROUTING,
467 NULL, &flags)
468 != NEXTHDR_ROUTING);
469
470 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
471 recalc_csum);
472 memcpy(&flow_key->ipv6.addr.dst, masked,
473 sizeof(flow_key->ipv6.addr.dst));
474 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800475 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800476 if (mask->ipv6_tclass) {
477 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
478 flow_key->ip.tos = ipv6_get_dsfield(nh);
479 }
480 if (mask->ipv6_label) {
481 set_ipv6_fl(nh, ntohl(key->ipv6_label),
482 ntohl(mask->ipv6_label));
483 flow_key->ipv6.label =
484 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
485 }
486 if (mask->ipv6_hlimit) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700487 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
488 mask->ipv6_hlimit);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800489 flow_key->ip.ttl = nh->hop_limit;
490 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800491 return 0;
492}
493
Jiri Pirkoe2195122014-11-19 14:05:01 +0100494/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700495static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800496 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700497{
Tom Herbert4b048d62015-08-17 13:42:25 -0700498 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700499 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700500}
501
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800502static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
503 const struct ovs_key_udp *key,
504 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700505{
506 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800507 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700508 int err;
509
Jiri Pirkoe2195122014-11-19 14:05:01 +0100510 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
511 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700512 if (unlikely(err))
513 return err;
514
515 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800516 /* Either of the masks is non-zero, so do not bother checking them. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700517 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
518 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800519
520 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
521 if (likely(src != uh->source)) {
522 set_tp_port(skb, &uh->source, src, &uh->check);
523 flow_key->tp.src = src;
524 }
525 if (likely(dst != uh->dest)) {
526 set_tp_port(skb, &uh->dest, dst, &uh->check);
527 flow_key->tp.dst = dst;
528 }
529
530 if (unlikely(!uh->check))
531 uh->check = CSUM_MANGLED_0;
532 } else {
533 uh->source = src;
534 uh->dest = dst;
535 flow_key->tp.src = src;
536 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800537 }
Jesse Grossccb13522011-10-25 19:26:31 -0700538
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800539 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700540
541 return 0;
542}
543
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800544static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
545 const struct ovs_key_tcp *key,
546 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700547{
548 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800549 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700550 int err;
551
Jiri Pirkoe2195122014-11-19 14:05:01 +0100552 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
553 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700554 if (unlikely(err))
555 return err;
556
557 th = tcp_hdr(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700558 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800559 if (likely(src != th->source)) {
560 set_tp_port(skb, &th->source, src, &th->check);
561 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800562 }
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700563 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800564 if (likely(dst != th->dest)) {
565 set_tp_port(skb, &th->dest, dst, &th->check);
566 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800567 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800568 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700569
570 return 0;
571}
572
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800573static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
574 const struct ovs_key_sctp *key,
575 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700576{
Joe Stringera175a722013-08-22 12:30:48 -0700577 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800578 struct sctphdr *sh;
579 __le32 old_correct_csum, new_csum, old_csum;
580 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700581
Jiri Pirkoe2195122014-11-19 14:05:01 +0100582 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700583 if (unlikely(err))
584 return err;
585
586 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800587 old_csum = sh->checksum;
588 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700589
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700590 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
591 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700592
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800593 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700594
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800595 /* Carry any checksum errors through. */
596 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700597
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800598 skb_clear_hash(skb);
599 flow_key->tp.src = sh->source;
600 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700601
602 return 0;
603}
604
Andy Zhou738967b2014-09-08 00:35:02 -0700605static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
Jesse Grossccb13522011-10-25 19:26:31 -0700606{
Andy Zhou738967b2014-09-08 00:35:02 -0700607 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700608
Andy Zhou738967b2014-09-08 00:35:02 -0700609 if (likely(vport))
610 ovs_vport_send(vport, skb);
611 else
Jesse Grossccb13522011-10-25 19:26:31 -0700612 kfree_skb(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700613}
614
615static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700616 struct sw_flow_key *key, const struct nlattr *attr,
617 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700618{
Thomas Graf1d8fff92015-07-21 10:43:54 +0200619 struct ip_tunnel_info info;
Jesse Grossccb13522011-10-25 19:26:31 -0700620 struct dp_upcall_info upcall;
621 const struct nlattr *a;
622 int rem;
623
Neil McKeeccea7442015-05-26 20:59:43 -0700624 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700625 upcall.cmd = OVS_PACKET_CMD_ACTION;
Jesse Grossccb13522011-10-25 19:26:31 -0700626
627 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
628 a = nla_next(a, &rem)) {
629 switch (nla_type(a)) {
630 case OVS_USERSPACE_ATTR_USERDATA:
631 upcall.userdata = a;
632 break;
633
634 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +0000635 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -0700636 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800637
638 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
639 /* Get out tunnel info. */
640 struct vport *vport;
641
642 vport = ovs_vport_rcu(dp, nla_get_u32(a));
643 if (vport) {
644 int err;
645
646 err = ovs_vport_get_egress_tun_info(vport, skb,
647 &info);
648 if (!err)
649 upcall.egress_tun_info = &info;
650 }
651 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700652 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800653
Neil McKeeccea7442015-05-26 20:59:43 -0700654 case OVS_USERSPACE_ATTR_ACTIONS: {
655 /* Include actions. */
656 upcall.actions = actions;
657 upcall.actions_len = actions_len;
658 break;
659 }
660
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800661 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -0700662 }
663
Pravin B Shelare8eedb82014-11-06 06:57:27 -0800664 return ovs_dp_upcall(dp, skb, key, &upcall);
Jesse Grossccb13522011-10-25 19:26:31 -0700665}
666
667static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700668 struct sw_flow_key *key, const struct nlattr *attr,
669 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700670{
671 const struct nlattr *acts_list = NULL;
672 const struct nlattr *a;
673 int rem;
674
675 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
676 a = nla_next(a, &rem)) {
Wenyu Zhange05176a2015-08-05 00:30:47 -0700677 u32 probability;
678
Jesse Grossccb13522011-10-25 19:26:31 -0700679 switch (nla_type(a)) {
680 case OVS_SAMPLE_ATTR_PROBABILITY:
Wenyu Zhange05176a2015-08-05 00:30:47 -0700681 probability = nla_get_u32(a);
682 if (!probability || prandom_u32() > probability)
Jesse Grossccb13522011-10-25 19:26:31 -0700683 return 0;
684 break;
685
686 case OVS_SAMPLE_ATTR_ACTIONS:
687 acts_list = a;
688 break;
689 }
690 }
691
Simon Horman651887b2014-07-21 15:12:34 -0700692 rem = nla_len(acts_list);
693 a = nla_data(acts_list);
694
Andy Zhou32ae87f2014-09-15 19:33:50 -0700695 /* Actions list is empty, do nothing */
696 if (unlikely(!rem))
697 return 0;
Simon Horman651887b2014-07-21 15:12:34 -0700698
Andy Zhou32ae87f2014-09-15 19:33:50 -0700699 /* The only known usage of sample action is having a single user-space
700 * action. Treat this usage as a special case.
701 * The output_userspace() should clone the skb to be sent to the
702 * user space. This skb will be consumed by its caller.
Simon Horman651887b2014-07-21 15:12:34 -0700703 */
Andy Zhou32ae87f2014-09-15 19:33:50 -0700704 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
Simon Horman941d8eb2014-10-27 16:12:16 +0900705 nla_is_last(a, rem)))
Neil McKeeccea7442015-05-26 20:59:43 -0700706 return output_userspace(dp, skb, key, a, actions, actions_len);
Andy Zhou32ae87f2014-09-15 19:33:50 -0700707
708 skb = skb_clone(skb, GFP_ATOMIC);
709 if (!skb)
710 /* Skip the sample action when out of memory. */
711 return 0;
712
Andy Zhou971427f32014-09-15 19:37:25 -0700713 if (!add_deferred_actions(skb, key, a)) {
714 if (net_ratelimit())
715 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
716 ovs_dp_name(dp));
717
718 kfree_skb(skb);
719 }
720 return 0;
721}
722
723static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
724 const struct nlattr *attr)
725{
726 struct ovs_action_hash *hash_act = nla_data(attr);
727 u32 hash = 0;
728
729 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
730 hash = skb_get_hash(skb);
731 hash = jhash_1word(hash, hash_act->hash_basis);
732 if (!hash)
733 hash = 0x1;
734
735 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -0700736}
737
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800738static int execute_set_action(struct sk_buff *skb,
739 struct sw_flow_key *flow_key,
740 const struct nlattr *a)
741{
742 /* Only tunnel set execution is supported without a mask. */
743 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +0200744 struct ovs_tunnel_info *tun = nla_data(a);
745
746 skb_dst_drop(skb);
747 dst_hold((struct dst_entry *)tun->tun_dst);
748 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
749
750 /* FIXME: Remove when all vports have been converted */
751 OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;
752
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800753 return 0;
754 }
755
756 return -EINVAL;
757}
758
759/* Mask is at the midpoint of the data. */
760#define get_mask(a, type) ((const type)nla_data(a) + 1)
761
762static int execute_masked_set_action(struct sk_buff *skb,
763 struct sw_flow_key *flow_key,
764 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -0700765{
766 int err = 0;
767
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800768 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -0700769 case OVS_KEY_ATTR_PRIORITY:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700770 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
771 *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800772 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -0700773 break;
774
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800775 case OVS_KEY_ATTR_SKB_MARK:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700776 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800777 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800778 break;
779
Jesse Grossf0b128c2014-10-03 15:35:31 -0700780 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800781 /* Masked data not supported for tunnel. */
782 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -0700783 break;
784
Jesse Grossccb13522011-10-25 19:26:31 -0700785 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800786 err = set_eth_addr(skb, flow_key, nla_data(a),
787 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -0700788 break;
789
790 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800791 err = set_ipv4(skb, flow_key, nla_data(a),
792 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -0700793 break;
794
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800795 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800796 err = set_ipv6(skb, flow_key, nla_data(a),
797 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800798 break;
799
Jesse Grossccb13522011-10-25 19:26:31 -0700800 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800801 err = set_tcp(skb, flow_key, nla_data(a),
802 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700803 break;
804
805 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800806 err = set_udp(skb, flow_key, nla_data(a),
807 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -0700808 break;
Joe Stringera175a722013-08-22 12:30:48 -0700809
810 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800811 err = set_sctp(skb, flow_key, nla_data(a),
812 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -0700813 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700814
815 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800816 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
817 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -0700818 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700819 }
820
821 return err;
822}
823
Andy Zhou971427f32014-09-15 19:37:25 -0700824static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
825 struct sw_flow_key *key,
826 const struct nlattr *a, int rem)
827{
828 struct deferred_action *da;
Andy Zhou971427f32014-09-15 19:37:25 -0700829
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800830 if (!is_flow_key_valid(key)) {
831 int err;
832
833 err = ovs_flow_key_update(skb, key);
834 if (err)
835 return err;
836 }
837 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -0700838
Simon Horman941d8eb2014-10-27 16:12:16 +0900839 if (!nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -0700840 /* Recirc action is the not the last action
841 * of the action list, need to clone the skb.
842 */
843 skb = skb_clone(skb, GFP_ATOMIC);
844
845 /* Skip the recirc action when out of memory, but
846 * continue on with the rest of the action list.
847 */
848 if (!skb)
849 return 0;
850 }
851
852 da = add_deferred_actions(skb, key, NULL);
853 if (da) {
854 da->pkt_key.recirc_id = nla_get_u32(a);
855 } else {
856 kfree_skb(skb);
857
858 if (net_ratelimit())
859 pr_warn("%s: deferred action limit reached, drop recirc action\n",
860 ovs_dp_name(dp));
861 }
862
863 return 0;
864}
865
Jesse Grossccb13522011-10-25 19:26:31 -0700866/* Execute a list of actions against 'skb'. */
867static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -0700868 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -0700869 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -0700870{
871 /* Every output action needs a separate clone of 'skb', but the common
872 * case is just a single output action, so that doing a clone and
873 * then freeing the original skbuff is wasteful. So the following code
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800874 * is slightly obscure just to avoid that.
875 */
Jesse Grossccb13522011-10-25 19:26:31 -0700876 int prev_port = -1;
877 const struct nlattr *a;
878 int rem;
879
880 for (a = attr, rem = len; rem > 0;
881 a = nla_next(a, &rem)) {
882 int err = 0;
883
Andy Zhou738967b2014-09-08 00:35:02 -0700884 if (unlikely(prev_port != -1)) {
885 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
886
887 if (out_skb)
888 do_output(dp, out_skb, prev_port);
889
Jesse Grossccb13522011-10-25 19:26:31 -0700890 prev_port = -1;
891 }
892
893 switch (nla_type(a)) {
894 case OVS_ACTION_ATTR_OUTPUT:
895 prev_port = nla_get_u32(a);
896 break;
897
898 case OVS_ACTION_ATTR_USERSPACE:
Neil McKeeccea7442015-05-26 20:59:43 -0700899 output_userspace(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -0700900 break;
901
Andy Zhou971427f32014-09-15 19:37:25 -0700902 case OVS_ACTION_ATTR_HASH:
903 execute_hash(skb, key, a);
904 break;
905
Simon Horman25cd9ba2014-10-06 05:05:13 -0700906 case OVS_ACTION_ATTR_PUSH_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800907 err = push_mpls(skb, key, nla_data(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -0700908 break;
909
910 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800911 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -0700912 break;
913
Jesse Grossccb13522011-10-25 19:26:31 -0700914 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800915 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -0700916 break;
917
918 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800919 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -0700920 break;
921
Andy Zhou971427f32014-09-15 19:37:25 -0700922 case OVS_ACTION_ATTR_RECIRC:
923 err = execute_recirc(dp, skb, key, a, rem);
Simon Horman941d8eb2014-10-27 16:12:16 +0900924 if (nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -0700925 /* If this is the last action, the skb has
926 * been consumed or freed.
927 * Return immediately.
928 */
929 return err;
930 }
931 break;
932
Jesse Grossccb13522011-10-25 19:26:31 -0700933 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800934 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -0700935 break;
936
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800937 case OVS_ACTION_ATTR_SET_MASKED:
938 case OVS_ACTION_ATTR_SET_TO_MASKED:
939 err = execute_masked_set_action(skb, key, nla_data(a));
940 break;
941
Jesse Grossccb13522011-10-25 19:26:31 -0700942 case OVS_ACTION_ATTR_SAMPLE:
Neil McKeeccea7442015-05-26 20:59:43 -0700943 err = sample(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -0700944 break;
945 }
946
947 if (unlikely(err)) {
948 kfree_skb(skb);
949 return err;
950 }
951 }
952
Simon Horman651887b2014-07-21 15:12:34 -0700953 if (prev_port != -1)
Jesse Grossccb13522011-10-25 19:26:31 -0700954 do_output(dp, skb, prev_port);
Simon Horman651887b2014-07-21 15:12:34 -0700955 else
Jesse Grossccb13522011-10-25 19:26:31 -0700956 consume_skb(skb);
957
958 return 0;
959}
960
Andy Zhou971427f32014-09-15 19:37:25 -0700961static void process_deferred_actions(struct datapath *dp)
962{
963 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
964
965 /* Do not touch the FIFO in case there is no deferred actions. */
966 if (action_fifo_is_empty(fifo))
967 return;
968
969 /* Finishing executing all deferred actions. */
970 do {
971 struct deferred_action *da = action_fifo_get(fifo);
972 struct sk_buff *skb = da->skb;
973 struct sw_flow_key *key = &da->pkt_key;
974 const struct nlattr *actions = da->actions;
975
976 if (actions)
977 do_execute_actions(dp, skb, key, actions,
978 nla_len(actions));
979 else
980 ovs_dp_process_packet(skb, key);
981 } while (!action_fifo_is_empty(fifo));
982
983 /* Reset FIFO for the next packet. */
984 action_fifo_init(fifo);
985}
986
Jesse Grossccb13522011-10-25 19:26:31 -0700987/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -0700988int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800989 const struct sw_flow_actions *acts,
990 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700991{
Andy Zhou971427f32014-09-15 19:37:25 -0700992 int level = this_cpu_read(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -0700993 int err;
Jesse Grossccb13522011-10-25 19:26:31 -0700994
Andy Zhou971427f32014-09-15 19:37:25 -0700995 this_cpu_inc(exec_actions_level);
Jesse Grossf0b128c2014-10-03 15:35:31 -0700996 OVS_CB(skb)->egress_tun_info = NULL;
Andy Zhou971427f32014-09-15 19:37:25 -0700997 err = do_execute_actions(dp, skb, key,
998 acts->actions, acts->actions_len);
999
1000 if (!level)
1001 process_deferred_actions(dp);
1002
1003 this_cpu_dec(exec_actions_level);
1004 return err;
1005}
1006
1007int action_fifos_init(void)
1008{
1009 action_fifos = alloc_percpu(struct action_fifo);
1010 if (!action_fifos)
1011 return -ENOMEM;
1012
1013 return 0;
1014}
1015
1016void action_fifos_exit(void)
1017{
1018 free_percpu(action_fifos);
Jesse Grossccb13522011-10-25 19:26:31 -07001019}