blob: e47ebbbe71b802b9428de3f824b8ce64265c031d [file] [log] [blame]
Jesse Grossccb13522011-10-25 19:26:31 -07001/*
andy zhou4572ef52017-03-20 16:32:28 -07002 * Copyright (c) 2007-2017 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070025#include <linux/netfilter_ipv6.h>
Joe Stringera175a722013-08-22 12:30:48 -070026#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070027#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070032
Joe Stringer7f8a4362015-08-26 11:31:48 -070033#include <net/dst.h>
Jesse Grossccb13522011-10-25 19:26:31 -070034#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080035#include <net/ipv6.h>
Joe Stringer7b85b4d2015-08-27 15:25:46 -070036#include <net/ip6_fib.h>
Jesse Grossccb13522011-10-25 19:26:31 -070037#include <net/checksum.h>
38#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070039#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070040#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070041
42#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070043#include "flow.h"
Joe Stringer7f8a4362015-08-26 11:31:48 -070044#include "conntrack.h"
Jesse Grossccb13522011-10-25 19:26:31 -070045#include "vport.h"
Yi Yangb2d0f5d2017-11-07 21:07:02 +080046#include "flow_netlink.h"
Jesse Grossccb13522011-10-25 19:26:31 -070047
Andy Zhou971427f32014-09-15 19:37:25 -070048struct deferred_action {
49 struct sk_buff *skb;
50 const struct nlattr *actions;
andy zhou47c697a2017-03-20 16:32:27 -070051 int actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -070052
53 /* Store pkt_key clone when creating deferred action. */
54 struct sw_flow_key pkt_key;
55};
56
Joe Stringer7f8a4362015-08-26 11:31:48 -070057#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
58struct ovs_frag_data {
59 unsigned long dst;
60 struct vport *vport;
61 struct ovs_skb_cb cb;
62 __be16 inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +020063 u16 network_offset; /* valid only for MPLS */
64 u16 vlan_tci;
Joe Stringer7f8a4362015-08-26 11:31:48 -070065 __be16 vlan_proto;
66 unsigned int l2_len;
Jiri Bence2d9d832016-11-10 16:28:19 +010067 u8 mac_proto;
Joe Stringer7f8a4362015-08-26 11:31:48 -070068 u8 l2_data[MAX_L2_LEN];
69};
70
71static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72
Andy Zhou971427f32014-09-15 19:37:25 -070073#define DEFERRED_ACTION_FIFO_SIZE 10
Lance Richardson2679d042016-09-13 10:08:54 -040074#define OVS_RECURSION_LIMIT 5
75#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
Andy Zhou971427f32014-09-15 19:37:25 -070076struct action_fifo {
77 int head;
78 int tail;
79 /* Deferred action fifo queue storage. */
80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
81};
82
andy zhou4572ef52017-03-20 16:32:28 -070083struct action_flow_keys {
Lance Richardson2679d042016-09-13 10:08:54 -040084 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
85};
86
Andy Zhou971427f32014-09-15 19:37:25 -070087static struct action_fifo __percpu *action_fifos;
andy zhou4572ef52017-03-20 16:32:28 -070088static struct action_flow_keys __percpu *flow_keys;
Andy Zhou971427f32014-09-15 19:37:25 -070089static DEFINE_PER_CPU(int, exec_actions_level);
90
andy zhou4572ef52017-03-20 16:32:28 -070091/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
92 * space. Return NULL if out of key spaces.
93 */
94static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
95{
96 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
97 int level = this_cpu_read(exec_actions_level);
98 struct sw_flow_key *key = NULL;
99
100 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
101 key = &keys->key[level - 1];
102 *key = *key_;
103 }
104
105 return key;
106}
107
Andy Zhou971427f32014-09-15 19:37:25 -0700108static void action_fifo_init(struct action_fifo *fifo)
109{
110 fifo->head = 0;
111 fifo->tail = 0;
112}
113
Thomas Graf12eb18f2014-11-06 06:58:52 -0800114static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -0700115{
116 return (fifo->head == fifo->tail);
117}
118
119static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
120{
121 if (action_fifo_is_empty(fifo))
122 return NULL;
123
124 return &fifo->fifo[fifo->tail++];
125}
126
127static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
128{
129 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
130 return NULL;
131
132 return &fifo->fifo[fifo->head++];
133}
134
135/* Return true if fifo is not full */
136static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
andy zhou47c697a2017-03-20 16:32:27 -0700137 const struct sw_flow_key *key,
138 const struct nlattr *actions,
139 const int actions_len)
Andy Zhou971427f32014-09-15 19:37:25 -0700140{
141 struct action_fifo *fifo;
142 struct deferred_action *da;
143
144 fifo = this_cpu_ptr(action_fifos);
145 da = action_fifo_put(fifo);
146 if (da) {
147 da->skb = skb;
andy zhou47c697a2017-03-20 16:32:27 -0700148 da->actions = actions;
149 da->actions_len = actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -0700150 da->pkt_key = *key;
151 }
152
153 return da;
154}
155
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800156static void invalidate_flow_key(struct sw_flow_key *key)
157{
Jiri Benc329f45b2016-11-10 16:28:18 +0100158 key->mac_proto |= SW_FLOW_KEY_INVALID;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800159}
160
161static bool is_flow_key_valid(const struct sw_flow_key *key)
162{
Jiri Benc329f45b2016-11-10 16:28:18 +0100163 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800164}
165
andy zhoubef7f752017-03-20 16:32:30 -0700166static int clone_execute(struct datapath *dp, struct sk_buff *skb,
167 struct sw_flow_key *key,
168 u32 recirc_id,
169 const struct nlattr *actions, int len,
170 bool last, bool clone_flow_key);
171
Simon Hormanbc7cc592016-05-30 14:04:25 +0900172static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
173 __be16 ethertype)
174{
175 if (skb->ip_summed == CHECKSUM_COMPLETE) {
176 __be16 diff[] = { ~(hdr->h_proto), ethertype };
177
178 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
179 ~skb->csum);
180 }
181
182 hdr->h_proto = ethertype;
183}
184
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800185static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Simon Horman25cd9ba2014-10-06 05:05:13 -0700186 const struct ovs_action_push_mpls *mpls)
187{
Jiri Benc85de4a22016-09-30 19:08:07 +0200188 struct mpls_shim_hdr *new_mpls_lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700189
190 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
191 if (skb->encapsulation)
192 return -ENOTSUPP;
193
194 if (skb_cow_head(skb, MPLS_HLEN) < 0)
195 return -ENOMEM;
196
David Ahern48d2ab62016-08-24 20:10:44 -0700197 if (!skb->inner_protocol) {
198 skb_set_inner_network_header(skb, skb->mac_len);
199 skb_set_inner_protocol(skb, skb->protocol);
200 }
201
Simon Horman25cd9ba2014-10-06 05:05:13 -0700202 skb_push(skb, MPLS_HLEN);
203 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
204 skb->mac_len);
205 skb_reset_mac_header(skb);
David Ahern48d2ab62016-08-24 20:10:44 -0700206 skb_set_network_header(skb, skb->mac_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700207
Jiri Benc85de4a22016-09-30 19:08:07 +0200208 new_mpls_lse = mpls_hdr(skb);
209 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700210
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100211 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700212
Jiri Benc1560a072016-11-10 16:28:20 +0100213 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
214 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700215 skb->protocol = mpls->mpls_ethertype;
216
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800217 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700218 return 0;
219}
220
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800221static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
222 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700223{
Simon Horman25cd9ba2014-10-06 05:05:13 -0700224 int err;
225
Jiri Pirkoe2195122014-11-19 14:05:01 +0100226 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700227 if (unlikely(err))
228 return err;
229
Jiri Benc85de4a22016-09-30 19:08:07 +0200230 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700231
232 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
233 skb->mac_len);
234
235 __skb_pull(skb, MPLS_HLEN);
236 skb_reset_mac_header(skb);
David Ahern48d2ab62016-08-24 20:10:44 -0700237 skb_set_network_header(skb, skb->mac_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700238
Jiri Benc1560a072016-11-10 16:28:20 +0100239 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
240 struct ethhdr *hdr;
241
242 /* mpls_hdr() is used to locate the ethertype field correctly in the
243 * presence of VLAN tags.
244 */
245 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
246 update_ethertype(skb, hdr, ethertype);
247 }
Simon Horman25cd9ba2014-10-06 05:05:13 -0700248 if (eth_p_mpls(skb->protocol))
249 skb->protocol = ethertype;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800250
251 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700252 return 0;
253}
254
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800255static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
256 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700257{
Jiri Benc85de4a22016-09-30 19:08:07 +0200258 struct mpls_shim_hdr *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800259 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700260 int err;
261
Jiri Pirkoe2195122014-11-19 14:05:01 +0100262 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700263 if (unlikely(err))
264 return err;
265
Jiri Benc85de4a22016-09-30 19:08:07 +0200266 stack = mpls_hdr(skb);
267 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700268 if (skb->ip_summed == CHECKSUM_COMPLETE) {
Jiri Benc85de4a22016-09-30 19:08:07 +0200269 __be32 diff[] = { ~(stack->label_stack_entry), lse };
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800270
Simon Horman25cd9ba2014-10-06 05:05:13 -0700271 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
272 ~skb->csum);
273 }
274
Jiri Benc85de4a22016-09-30 19:08:07 +0200275 stack->label_stack_entry = lse;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800276 flow_key->mpls.top_lse = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700277 return 0;
278}
279
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800280static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700281{
Jesse Grossccb13522011-10-25 19:26:31 -0700282 int err;
283
Jiri Pirko93515d52014-11-19 14:05:02 +0100284 err = skb_vlan_pop(skb);
Eric Garver018c1dd2016-09-07 12:56:59 -0400285 if (skb_vlan_tag_present(skb)) {
Jiri Pirko93515d52014-11-19 14:05:02 +0100286 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400287 } else {
288 key->eth.vlan.tci = 0;
289 key->eth.vlan.tpid = 0;
290 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100291 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700292}
293
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800294static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
295 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700296{
Eric Garver018c1dd2016-09-07 12:56:59 -0400297 if (skb_vlan_tag_present(skb)) {
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800298 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400299 } else {
300 key->eth.vlan.tci = vlan->vlan_tci;
301 key->eth.vlan.tpid = vlan->vlan_tpid;
302 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100303 return skb_vlan_push(skb, vlan->vlan_tpid,
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100304 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
Jesse Grossccb13522011-10-25 19:26:31 -0700305}
306
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800307/* 'src' is already properly masked. */
308static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
309{
310 u16 *dst = (u16 *)dst_;
311 const u16 *src = (const u16 *)src_;
312 const u16 *mask = (const u16 *)mask_;
313
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700314 OVS_SET_MASKED(dst[0], src[0], mask[0]);
315 OVS_SET_MASKED(dst[1], src[1], mask[1]);
316 OVS_SET_MASKED(dst[2], src[2], mask[2]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800317}
318
319static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
320 const struct ovs_key_ethernet *key,
321 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700322{
323 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800324
Jiri Pirkoe2195122014-11-19 14:05:01 +0100325 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700326 if (unlikely(err))
327 return err;
328
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700329 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
330
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800331 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
332 mask->eth_src);
333 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
334 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700335
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100336 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700337
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800338 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
339 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700340 return 0;
341}
342
Jiri Benc91820da2016-11-10 16:28:23 +0100343/* pop_eth does not support VLAN packets as this action is never called
344 * for them.
345 */
346static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
347{
348 skb_pull_rcsum(skb, ETH_HLEN);
349 skb_reset_mac_header(skb);
350 skb_reset_mac_len(skb);
351
352 /* safe right before invalidate_flow_key */
353 key->mac_proto = MAC_PROTO_NONE;
354 invalidate_flow_key(key);
355 return 0;
356}
357
358static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
359 const struct ovs_action_push_eth *ethh)
360{
361 struct ethhdr *hdr;
362
363 /* Add the new Ethernet header */
364 if (skb_cow_head(skb, ETH_HLEN) < 0)
365 return -ENOMEM;
366
367 skb_push(skb, ETH_HLEN);
368 skb_reset_mac_header(skb);
369 skb_reset_mac_len(skb);
370
371 hdr = eth_hdr(skb);
372 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
373 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
374 hdr->h_proto = skb->protocol;
375
376 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
377
378 /* safe right before invalidate_flow_key */
379 key->mac_proto = MAC_PROTO_ETHERNET;
380 invalidate_flow_key(key);
381 return 0;
382}
383
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800384static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
385 const struct nshhdr *nh)
386{
387 int err;
388
389 err = nsh_push(skb, nh);
390 if (err)
391 return err;
392
393 /* safe right before invalidate_flow_key */
394 key->mac_proto = MAC_PROTO_NONE;
395 invalidate_flow_key(key);
396 return 0;
397}
398
399static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
400{
401 int err;
402
403 err = nsh_pop(skb);
404 if (err)
405 return err;
406
407 /* safe right before invalidate_flow_key */
408 if (skb->protocol == htons(ETH_P_TEB))
409 key->mac_proto = MAC_PROTO_ETHERNET;
410 else
411 key->mac_proto = MAC_PROTO_NONE;
412 invalidate_flow_key(key);
413 return 0;
414}
415
Glenn Griffin3576fd72015-08-03 09:56:54 -0700416static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
417 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700418{
419 int transport_len = skb->len - skb_transport_offset(skb);
420
Glenn Griffin3576fd72015-08-03 09:56:54 -0700421 if (nh->frag_off & htons(IP_OFFSET))
422 return;
423
Jesse Grossccb13522011-10-25 19:26:31 -0700424 if (nh->protocol == IPPROTO_TCP) {
425 if (likely(transport_len >= sizeof(struct tcphdr)))
426 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700427 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700428 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800429 if (likely(transport_len >= sizeof(struct udphdr))) {
430 struct udphdr *uh = udp_hdr(skb);
431
432 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
433 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700434 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800435 if (!uh->check)
436 uh->check = CSUM_MANGLED_0;
437 }
438 }
Jesse Grossccb13522011-10-25 19:26:31 -0700439 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700440}
Jesse Grossccb13522011-10-25 19:26:31 -0700441
Glenn Griffin3576fd72015-08-03 09:56:54 -0700442static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
443 __be32 *addr, __be32 new_addr)
444{
445 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700446 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800447 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700448 *addr = new_addr;
449}
450
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800451static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
452 __be32 addr[4], const __be32 new_addr[4])
453{
454 int transport_len = skb->len - skb_transport_offset(skb);
455
Jesse Gross856447d2014-11-11 14:32:20 -0800456 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800457 if (likely(transport_len >= sizeof(struct tcphdr)))
458 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700459 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800460 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800461 if (likely(transport_len >= sizeof(struct udphdr))) {
462 struct udphdr *uh = udp_hdr(skb);
463
464 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
465 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700466 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800467 if (!uh->check)
468 uh->check = CSUM_MANGLED_0;
469 }
470 }
Jesse Gross856447d2014-11-11 14:32:20 -0800471 } else if (l4_proto == NEXTHDR_ICMP) {
472 if (likely(transport_len >= sizeof(struct icmp6hdr)))
473 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700474 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800475 }
476}
477
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800478static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
479 const __be32 mask[4], __be32 masked[4])
480{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700481 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
482 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
483 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
484 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800485}
486
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800487static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
488 __be32 addr[4], const __be32 new_addr[4],
489 bool recalculate_csum)
490{
491 if (recalculate_csum)
492 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
493
Tom Herbert7539fad2013-12-15 22:12:18 -0800494 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800495 memcpy(addr, new_addr, sizeof(__be32[4]));
496}
497
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800498static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800499{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800500 /* Bits 21-24 are always unmasked, so this retains their values. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700501 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
502 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
503 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800504}
505
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800506static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
507 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800508{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700509 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800510
Jesse Grossccb13522011-10-25 19:26:31 -0700511 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
512 nh->ttl = new_ttl;
513}
514
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800515static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
516 const struct ovs_key_ipv4 *key,
517 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700518{
519 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800520 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700521 int err;
522
Jiri Pirkoe2195122014-11-19 14:05:01 +0100523 err = skb_ensure_writable(skb, skb_network_offset(skb) +
524 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700525 if (unlikely(err))
526 return err;
527
528 nh = ip_hdr(skb);
529
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800530 /* Setting an IP addresses is typically only a side effect of
531 * matching on them in the current userspace implementation, so it
532 * makes sense to check if the value actually changed.
533 */
534 if (mask->ipv4_src) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700535 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700536
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800537 if (unlikely(new_addr != nh->saddr)) {
538 set_ip_addr(skb, nh, &nh->saddr, new_addr);
539 flow_key->ipv4.addr.src = new_addr;
540 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800541 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800542 if (mask->ipv4_dst) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700543 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700544
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800545 if (unlikely(new_addr != nh->daddr)) {
546 set_ip_addr(skb, nh, &nh->daddr, new_addr);
547 flow_key->ipv4.addr.dst = new_addr;
548 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800549 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800550 if (mask->ipv4_tos) {
551 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
552 flow_key->ip.tos = nh->tos;
553 }
554 if (mask->ipv4_ttl) {
555 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
556 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800557 }
Jesse Grossccb13522011-10-25 19:26:31 -0700558
559 return 0;
560}
561
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800562static bool is_ipv6_mask_nonzero(const __be32 addr[4])
563{
564 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
565}
566
567static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
568 const struct ovs_key_ipv6 *key,
569 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800570{
571 struct ipv6hdr *nh;
572 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800573
Jiri Pirkoe2195122014-11-19 14:05:01 +0100574 err = skb_ensure_writable(skb, skb_network_offset(skb) +
575 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800576 if (unlikely(err))
577 return err;
578
579 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800580
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800581 /* Setting an IP addresses is typically only a side effect of
582 * matching on them in the current userspace implementation, so it
583 * makes sense to check if the value actually changed.
584 */
585 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
586 __be32 *saddr = (__be32 *)&nh->saddr;
587 __be32 masked[4];
588
589 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
590
591 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
Simon Hormanb4f70522016-04-21 11:49:15 +1000592 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800593 true);
594 memcpy(&flow_key->ipv6.addr.src, masked,
595 sizeof(flow_key->ipv6.addr.src));
596 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800597 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800598 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800599 unsigned int offset = 0;
600 int flags = IP6_FH_F_SKIP_RH;
601 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800602 __be32 *daddr = (__be32 *)&nh->daddr;
603 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800604
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800605 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800606
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800607 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
608 if (ipv6_ext_hdr(nh->nexthdr))
609 recalc_csum = (ipv6_find_hdr(skb, &offset,
610 NEXTHDR_ROUTING,
611 NULL, &flags)
612 != NEXTHDR_ROUTING);
613
Simon Hormanb4f70522016-04-21 11:49:15 +1000614 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800615 recalc_csum);
616 memcpy(&flow_key->ipv6.addr.dst, masked,
617 sizeof(flow_key->ipv6.addr.dst));
618 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800619 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800620 if (mask->ipv6_tclass) {
621 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
622 flow_key->ip.tos = ipv6_get_dsfield(nh);
623 }
624 if (mask->ipv6_label) {
625 set_ipv6_fl(nh, ntohl(key->ipv6_label),
626 ntohl(mask->ipv6_label));
627 flow_key->ipv6.label =
628 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
629 }
630 if (mask->ipv6_hlimit) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700631 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
632 mask->ipv6_hlimit);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800633 flow_key->ip.ttl = nh->hop_limit;
634 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800635 return 0;
636}
637
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800638static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
639 const struct nlattr *a)
640{
641 struct nshhdr *nh;
642 size_t length;
643 int err;
644 u8 flags;
645 u8 ttl;
646 int i;
647
648 struct ovs_key_nsh key;
649 struct ovs_key_nsh mask;
650
651 err = nsh_key_from_nlattr(a, &key, &mask);
652 if (err)
653 return err;
654
655 /* Make sure the NSH base header is there */
656 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
657 return -ENOMEM;
658
659 nh = nsh_hdr(skb);
660 length = nsh_hdr_len(nh);
661
662 /* Make sure the whole NSH header is there */
663 err = skb_ensure_writable(skb, skb_network_offset(skb) +
664 length);
665 if (unlikely(err))
666 return err;
667
668 nh = nsh_hdr(skb);
669 skb_postpull_rcsum(skb, nh, length);
670 flags = nsh_get_flags(nh);
671 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
672 flow_key->nsh.base.flags = flags;
673 ttl = nsh_get_ttl(nh);
674 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
675 flow_key->nsh.base.ttl = ttl;
676 nsh_set_flags_and_ttl(nh, flags, ttl);
677 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
678 mask.base.path_hdr);
679 flow_key->nsh.base.path_hdr = nh->path_hdr;
680 switch (nh->mdtype) {
681 case NSH_M_TYPE1:
682 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
683 nh->md1.context[i] =
684 OVS_MASKED(nh->md1.context[i], key.context[i],
685 mask.context[i]);
686 }
687 memcpy(flow_key->nsh.context, nh->md1.context,
688 sizeof(nh->md1.context));
689 break;
690 case NSH_M_TYPE2:
691 memset(flow_key->nsh.context, 0,
692 sizeof(flow_key->nsh.context));
693 break;
694 default:
695 return -EINVAL;
696 }
697 skb_postpush_rcsum(skb, nh, length);
698 return 0;
699}
700
Jiri Pirkoe2195122014-11-19 14:05:01 +0100701/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700702static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800703 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700704{
Tom Herbert4b048d62015-08-17 13:42:25 -0700705 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700706 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700707}
708
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800709static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
710 const struct ovs_key_udp *key,
711 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700712{
713 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800714 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700715 int err;
716
Jiri Pirkoe2195122014-11-19 14:05:01 +0100717 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
718 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700719 if (unlikely(err))
720 return err;
721
722 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800723 /* Either of the masks is non-zero, so do not bother checking them. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700724 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
725 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800726
727 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
728 if (likely(src != uh->source)) {
729 set_tp_port(skb, &uh->source, src, &uh->check);
730 flow_key->tp.src = src;
731 }
732 if (likely(dst != uh->dest)) {
733 set_tp_port(skb, &uh->dest, dst, &uh->check);
734 flow_key->tp.dst = dst;
735 }
736
737 if (unlikely(!uh->check))
738 uh->check = CSUM_MANGLED_0;
739 } else {
740 uh->source = src;
741 uh->dest = dst;
742 flow_key->tp.src = src;
743 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800744 }
Jesse Grossccb13522011-10-25 19:26:31 -0700745
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800746 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700747
748 return 0;
749}
750
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800751static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
752 const struct ovs_key_tcp *key,
753 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700754{
755 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800756 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700757 int err;
758
Jiri Pirkoe2195122014-11-19 14:05:01 +0100759 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
760 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700761 if (unlikely(err))
762 return err;
763
764 th = tcp_hdr(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700765 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800766 if (likely(src != th->source)) {
767 set_tp_port(skb, &th->source, src, &th->check);
768 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800769 }
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700770 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800771 if (likely(dst != th->dest)) {
772 set_tp_port(skb, &th->dest, dst, &th->check);
773 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800774 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800775 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700776
777 return 0;
778}
779
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800780static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
781 const struct ovs_key_sctp *key,
782 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700783{
Joe Stringera175a722013-08-22 12:30:48 -0700784 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800785 struct sctphdr *sh;
786 __le32 old_correct_csum, new_csum, old_csum;
787 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700788
Jiri Pirkoe2195122014-11-19 14:05:01 +0100789 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700790 if (unlikely(err))
791 return err;
792
793 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800794 old_csum = sh->checksum;
795 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700796
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700797 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
798 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700799
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800800 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700801
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800802 /* Carry any checksum errors through. */
803 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700804
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800805 skb_clear_hash(skb);
806 flow_key->tp.src = sh->source;
807 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700808
809 return 0;
810}
811
Eric W. Biederman188515f2015-09-14 20:08:51 -0500812static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700813{
814 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
815 struct vport *vport = data->vport;
816
817 if (skb_cow_head(skb, data->l2_len) < 0) {
818 kfree_skb(skb);
819 return -ENOMEM;
820 }
821
822 __skb_dst_copy(skb, data->dst);
823 *OVS_CB(skb) = data->cb;
824 skb->inner_protocol = data->inner_protocol;
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100825 if (data->vlan_tci & VLAN_CFI_MASK)
826 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
827 else
828 __vlan_hwaccel_clear_tag(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700829
830 /* Reconstruct the MAC header. */
831 skb_push(skb, data->l2_len);
832 memcpy(skb->data, &data->l2_data, data->l2_len);
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100833 skb_postpush_rcsum(skb, skb->data, data->l2_len);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700834 skb_reset_mac_header(skb);
835
Jiri Bencc66549f2016-10-05 15:01:57 +0200836 if (eth_p_mpls(skb->protocol)) {
837 skb->inner_network_header = skb->network_header;
838 skb_set_network_header(skb, data->network_offset);
839 skb_reset_mac_len(skb);
840 }
841
Jiri Bence2d9d832016-11-10 16:28:19 +0100842 ovs_vport_send(vport, skb, data->mac_proto);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700843 return 0;
844}
845
846static unsigned int
847ovs_dst_get_mtu(const struct dst_entry *dst)
848{
849 return dst->dev->mtu;
850}
851
852static struct dst_ops ovs_dst_ops = {
853 .family = AF_UNSPEC,
854 .mtu = ovs_dst_get_mtu,
855};
856
857/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
858 * ovs_vport_output(), which is called once per fragmented packet.
859 */
Jiri Bencc66549f2016-10-05 15:01:57 +0200860static void prepare_frag(struct vport *vport, struct sk_buff *skb,
Jiri Bence2d9d832016-11-10 16:28:19 +0100861 u16 orig_network_offset, u8 mac_proto)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700862{
863 unsigned int hlen = skb_network_offset(skb);
864 struct ovs_frag_data *data;
865
866 data = this_cpu_ptr(&ovs_frag_data_storage);
867 data->dst = skb->_skb_refdst;
868 data->vport = vport;
869 data->cb = *OVS_CB(skb);
870 data->inner_protocol = skb->inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +0200871 data->network_offset = orig_network_offset;
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100872 if (skb_vlan_tag_present(skb))
873 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
874 else
875 data->vlan_tci = 0;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700876 data->vlan_proto = skb->vlan_proto;
Jiri Bence2d9d832016-11-10 16:28:19 +0100877 data->mac_proto = mac_proto;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700878 data->l2_len = hlen;
879 memcpy(&data->l2_data, skb->data, hlen);
880
881 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
882 skb_pull(skb, hlen);
883}
884
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500885static void ovs_fragment(struct net *net, struct vport *vport,
Jiri Bence2d9d832016-11-10 16:28:19 +0100886 struct sk_buff *skb, u16 mru,
887 struct sw_flow_key *key)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700888{
Jiri Bencc66549f2016-10-05 15:01:57 +0200889 u16 orig_network_offset = 0;
890
891 if (eth_p_mpls(skb->protocol)) {
892 orig_network_offset = skb_network_offset(skb);
893 skb->network_header = skb->inner_network_header;
894 }
895
Joe Stringer7f8a4362015-08-26 11:31:48 -0700896 if (skb_network_offset(skb) > MAX_L2_LEN) {
897 OVS_NLERR(1, "L2 header too long to fragment");
Joe Stringerb8f22572015-10-06 10:59:57 -0700898 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700899 }
900
Jiri Bence2d9d832016-11-10 16:28:19 +0100901 if (key->eth.type == htons(ETH_P_IP)) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700902 struct dst_entry ovs_dst;
903 unsigned long orig_dst;
904
Jiri Bence2d9d832016-11-10 16:28:19 +0100905 prepare_frag(vport, skb, orig_network_offset,
906 ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700907 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
908 DST_OBSOLETE_NONE, DST_NOCOUNT);
909 ovs_dst.dev = vport->dev;
910
911 orig_dst = skb->_skb_refdst;
912 skb_dst_set_noref(skb, &ovs_dst);
913 IPCB(skb)->frag_max_size = mru;
914
Eric W. Biederman694869b2015-06-12 21:55:31 -0500915 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700916 refdst_drop(orig_dst);
Jiri Bence2d9d832016-11-10 16:28:19 +0100917 } else if (key->eth.type == htons(ETH_P_IPV6)) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700918 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
919 unsigned long orig_dst;
920 struct rt6_info ovs_rt;
921
Peter Downsf1304f72017-03-01 01:01:17 -0800922 if (!v6ops)
Joe Stringerb8f22572015-10-06 10:59:57 -0700923 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700924
Jiri Bence2d9d832016-11-10 16:28:19 +0100925 prepare_frag(vport, skb, orig_network_offset,
926 ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700927 memset(&ovs_rt, 0, sizeof(ovs_rt));
928 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
929 DST_OBSOLETE_NONE, DST_NOCOUNT);
930 ovs_rt.dst.dev = vport->dev;
931
932 orig_dst = skb->_skb_refdst;
933 skb_dst_set_noref(skb, &ovs_rt.dst);
934 IP6CB(skb)->frag_max_size = mru;
935
Eric W. Biederman7d8c6e32015-06-12 22:12:04 -0500936 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700937 refdst_drop(orig_dst);
938 } else {
939 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
Jiri Bence2d9d832016-11-10 16:28:19 +0100940 ovs_vport_name(vport), ntohs(key->eth.type), mru,
Joe Stringer7f8a4362015-08-26 11:31:48 -0700941 vport->dev->mtu);
Joe Stringerb8f22572015-10-06 10:59:57 -0700942 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700943 }
Joe Stringerb8f22572015-10-06 10:59:57 -0700944
945 return;
946err:
947 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700948}
949
950static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
951 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700952{
Andy Zhou738967b2014-09-08 00:35:02 -0700953 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700954
Joe Stringer7f8a4362015-08-26 11:31:48 -0700955 if (likely(vport)) {
956 u16 mru = OVS_CB(skb)->mru;
William Tuf2a4d082016-06-10 11:49:33 -0700957 u32 cutlen = OVS_CB(skb)->cutlen;
958
959 if (unlikely(cutlen > 0)) {
Jiri Bence2d9d832016-11-10 16:28:19 +0100960 if (skb->len - cutlen > ovs_mac_header_len(key))
William Tuf2a4d082016-06-10 11:49:33 -0700961 pskb_trim(skb, skb->len - cutlen);
962 else
Jiri Bence2d9d832016-11-10 16:28:19 +0100963 pskb_trim(skb, ovs_mac_header_len(key));
William Tuf2a4d082016-06-10 11:49:33 -0700964 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700965
Jiri Benc738314a2016-11-10 16:28:17 +0100966 if (likely(!mru ||
967 (skb->len <= mru + vport->dev->hard_header_len))) {
Jiri Bence2d9d832016-11-10 16:28:19 +0100968 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700969 } else if (mru <= vport->dev->mtu) {
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500970 struct net *net = read_pnet(&dp->net);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700971
Jiri Bence2d9d832016-11-10 16:28:19 +0100972 ovs_fragment(net, vport, skb, mru, key);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700973 } else {
974 kfree_skb(skb);
975 }
976 } else {
Jesse Grossccb13522011-10-25 19:26:31 -0700977 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700978 }
Jesse Grossccb13522011-10-25 19:26:31 -0700979}
980
981static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700982 struct sw_flow_key *key, const struct nlattr *attr,
William Tuf2a4d082016-06-10 11:49:33 -0700983 const struct nlattr *actions, int actions_len,
984 uint32_t cutlen)
Jesse Grossccb13522011-10-25 19:26:31 -0700985{
986 struct dp_upcall_info upcall;
987 const struct nlattr *a;
988 int rem;
989
Neil McKeeccea7442015-05-26 20:59:43 -0700990 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700991 upcall.cmd = OVS_PACKET_CMD_ACTION;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700992 upcall.mru = OVS_CB(skb)->mru;
Jesse Grossccb13522011-10-25 19:26:31 -0700993
994 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
995 a = nla_next(a, &rem)) {
996 switch (nla_type(a)) {
997 case OVS_USERSPACE_ATTR_USERDATA:
998 upcall.userdata = a;
999 break;
1000
1001 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +00001002 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -07001003 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -08001004
1005 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1006 /* Get out tunnel info. */
1007 struct vport *vport;
1008
1009 vport = ovs_vport_rcu(dp, nla_get_u32(a));
1010 if (vport) {
1011 int err;
1012
Pravin B Shelarfc4099f2015-10-22 18:17:16 -07001013 err = dev_fill_metadata_dst(vport->dev, skb);
1014 if (!err)
1015 upcall.egress_tun_info = skb_tunnel_info(skb);
Wenyu Zhang8f0aad62014-11-06 06:51:24 -08001016 }
Pravin B Shelar4c222792015-08-30 18:09:38 -07001017
Wenyu Zhang8f0aad62014-11-06 06:51:24 -08001018 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001019 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -08001020
Neil McKeeccea7442015-05-26 20:59:43 -07001021 case OVS_USERSPACE_ATTR_ACTIONS: {
1022 /* Include actions. */
1023 upcall.actions = actions;
1024 upcall.actions_len = actions_len;
1025 break;
1026 }
1027
Wenyu Zhang8f0aad62014-11-06 06:51:24 -08001028 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -07001029 }
1030
William Tuf2a4d082016-06-10 11:49:33 -07001031 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
Jesse Grossccb13522011-10-25 19:26:31 -07001032}
1033
andy zhou798c1662017-03-20 16:32:29 -07001034/* When 'last' is true, sample() should always consume the 'skb'.
1035 * Otherwise, sample() should keep 'skb' intact regardless what
1036 * actions are executed within sample().
1037 */
Jesse Grossccb13522011-10-25 19:26:31 -07001038static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -07001039 struct sw_flow_key *key, const struct nlattr *attr,
andy zhou798c1662017-03-20 16:32:29 -07001040 bool last)
Jesse Grossccb13522011-10-25 19:26:31 -07001041{
andy zhou798c1662017-03-20 16:32:29 -07001042 struct nlattr *actions;
1043 struct nlattr *sample_arg;
andy zhou798c1662017-03-20 16:32:29 -07001044 int rem = nla_len(attr);
andy zhou798c1662017-03-20 16:32:29 -07001045 const struct sample_arg *arg;
andy zhoubef7f752017-03-20 16:32:30 -07001046 bool clone_flow_key;
Jesse Grossccb13522011-10-25 19:26:31 -07001047
andy zhou798c1662017-03-20 16:32:29 -07001048 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1049 sample_arg = nla_data(attr);
1050 arg = nla_data(sample_arg);
1051 actions = nla_next(sample_arg, &rem);
Wenyu Zhange05176a2015-08-05 00:30:47 -07001052
andy zhou798c1662017-03-20 16:32:29 -07001053 if ((arg->probability != U32_MAX) &&
1054 (!arg->probability || prandom_u32() > arg->probability)) {
1055 if (last)
1056 consume_skb(skb);
1057 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001058 }
1059
andy zhoubef7f752017-03-20 16:32:30 -07001060 clone_flow_key = !arg->exec;
1061 return clone_execute(dp, skb, key, 0, actions, rem, last,
1062 clone_flow_key);
Andy Zhou971427f32014-09-15 19:37:25 -07001063}
1064
Yifeng Sunb2335042018-07-02 08:18:03 -07001065/* When 'last' is true, clone() should always consume the 'skb'.
1066 * Otherwise, clone() should keep 'skb' intact regardless what
1067 * actions are executed within clone().
1068 */
1069static int clone(struct datapath *dp, struct sk_buff *skb,
1070 struct sw_flow_key *key, const struct nlattr *attr,
1071 bool last)
1072{
1073 struct nlattr *actions;
1074 struct nlattr *clone_arg;
1075 int rem = nla_len(attr);
1076 bool dont_clone_flow_key;
1077
1078 /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1079 clone_arg = nla_data(attr);
1080 dont_clone_flow_key = nla_get_u32(clone_arg);
1081 actions = nla_next(clone_arg, &rem);
1082
1083 return clone_execute(dp, skb, key, 0, actions, rem, last,
1084 !dont_clone_flow_key);
1085}
1086
Andy Zhou971427f32014-09-15 19:37:25 -07001087static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1088 const struct nlattr *attr)
1089{
1090 struct ovs_action_hash *hash_act = nla_data(attr);
1091 u32 hash = 0;
1092
1093 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1094 hash = skb_get_hash(skb);
1095 hash = jhash_1word(hash, hash_act->hash_basis);
1096 if (!hash)
1097 hash = 0x1;
1098
1099 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -07001100}
1101
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001102static int execute_set_action(struct sk_buff *skb,
1103 struct sw_flow_key *flow_key,
1104 const struct nlattr *a)
1105{
1106 /* Only tunnel set execution is supported without a mask. */
1107 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +02001108 struct ovs_tunnel_info *tun = nla_data(a);
1109
1110 skb_dst_drop(skb);
1111 dst_hold((struct dst_entry *)tun->tun_dst);
1112 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001113 return 0;
1114 }
1115
1116 return -EINVAL;
1117}
1118
1119/* Mask is at the midpoint of the data. */
1120#define get_mask(a, type) ((const type)nla_data(a) + 1)
1121
1122static int execute_masked_set_action(struct sk_buff *skb,
1123 struct sw_flow_key *flow_key,
1124 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -07001125{
1126 int err = 0;
1127
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001128 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -07001129 case OVS_KEY_ATTR_PRIORITY:
Joe Stringerbe26b9a2015-08-26 11:31:45 -07001130 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1131 *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001132 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -07001133 break;
1134
Ansis Atteka39c7caeb2012-11-26 11:24:11 -08001135 case OVS_KEY_ATTR_SKB_MARK:
Joe Stringerbe26b9a2015-08-26 11:31:45 -07001136 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001137 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -08001138 break;
1139
Jesse Grossf0b128c2014-10-03 15:35:31 -07001140 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001141 /* Masked data not supported for tunnel. */
1142 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -07001143 break;
1144
Jesse Grossccb13522011-10-25 19:26:31 -07001145 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001146 err = set_eth_addr(skb, flow_key, nla_data(a),
1147 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -07001148 break;
1149
Yi Yangb2d0f5d2017-11-07 21:07:02 +08001150 case OVS_KEY_ATTR_NSH:
1151 err = set_nsh(skb, flow_key, a);
1152 break;
1153
Jesse Grossccb13522011-10-25 19:26:31 -07001154 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001155 err = set_ipv4(skb, flow_key, nla_data(a),
1156 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -07001157 break;
1158
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -08001159 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001160 err = set_ipv6(skb, flow_key, nla_data(a),
1161 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -08001162 break;
1163
Jesse Grossccb13522011-10-25 19:26:31 -07001164 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001165 err = set_tcp(skb, flow_key, nla_data(a),
1166 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001167 break;
1168
1169 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001170 err = set_udp(skb, flow_key, nla_data(a),
1171 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001172 break;
Joe Stringera175a722013-08-22 12:30:48 -07001173
1174 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001175 err = set_sctp(skb, flow_key, nla_data(a),
1176 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -07001177 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -07001178
1179 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001180 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1181 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001182 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001183
1184 case OVS_KEY_ATTR_CT_STATE:
1185 case OVS_KEY_ATTR_CT_ZONE:
Joe Stringer182e3042015-08-26 11:31:49 -07001186 case OVS_KEY_ATTR_CT_MARK:
Joe Stringer33db4122015-10-01 15:00:37 -07001187 case OVS_KEY_ATTR_CT_LABELS:
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -08001188 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1189 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
Joe Stringer7f8a4362015-08-26 11:31:48 -07001190 err = -EINVAL;
1191 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001192 }
1193
1194 return err;
1195}
1196
Andy Zhou971427f32014-09-15 19:37:25 -07001197static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1198 struct sw_flow_key *key,
andy zhoubef7f752017-03-20 16:32:30 -07001199 const struct nlattr *a, bool last)
Andy Zhou971427f32014-09-15 19:37:25 -07001200{
andy zhoubef7f752017-03-20 16:32:30 -07001201 u32 recirc_id;
Andy Zhou971427f32014-09-15 19:37:25 -07001202
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001203 if (!is_flow_key_valid(key)) {
1204 int err;
1205
1206 err = ovs_flow_key_update(skb, key);
1207 if (err)
1208 return err;
1209 }
1210 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -07001211
andy zhoubef7f752017-03-20 16:32:30 -07001212 recirc_id = nla_get_u32(a);
1213 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
Andy Zhou971427f32014-09-15 19:37:25 -07001214}
1215
Jesse Grossccb13522011-10-25 19:26:31 -07001216/* Execute a list of actions against 'skb'. */
1217static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001218 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -07001219 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -07001220{
Jesse Grossccb13522011-10-25 19:26:31 -07001221 const struct nlattr *a;
1222 int rem;
1223
1224 for (a = attr, rem = len; rem > 0;
1225 a = nla_next(a, &rem)) {
1226 int err = 0;
1227
Jesse Grossccb13522011-10-25 19:26:31 -07001228 switch (nla_type(a)) {
andy zhou5b8784a2017-01-27 13:45:28 -08001229 case OVS_ACTION_ATTR_OUTPUT: {
1230 int port = nla_get_u32(a);
1231 struct sk_buff *clone;
1232
1233 /* Every output action needs a separate clone
1234 * of 'skb', In case the output action is the
1235 * last action, cloning can be avoided.
1236 */
1237 if (nla_is_last(a, rem)) {
1238 do_output(dp, skb, port, key);
1239 /* 'skb' has been used for output.
1240 */
1241 return 0;
1242 }
1243
1244 clone = skb_clone(skb, GFP_ATOMIC);
1245 if (clone)
1246 do_output(dp, clone, port, key);
1247 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001248 break;
andy zhou5b8784a2017-01-27 13:45:28 -08001249 }
Jesse Grossccb13522011-10-25 19:26:31 -07001250
William Tuf2a4d082016-06-10 11:49:33 -07001251 case OVS_ACTION_ATTR_TRUNC: {
1252 struct ovs_action_trunc *trunc = nla_data(a);
1253
1254 if (skb->len > trunc->max_len)
1255 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1256 break;
1257 }
1258
Jesse Grossccb13522011-10-25 19:26:31 -07001259 case OVS_ACTION_ATTR_USERSPACE:
William Tuf2a4d082016-06-10 11:49:33 -07001260 output_userspace(dp, skb, key, a, attr,
1261 len, OVS_CB(skb)->cutlen);
1262 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001263 break;
1264
Andy Zhou971427f32014-09-15 19:37:25 -07001265 case OVS_ACTION_ATTR_HASH:
1266 execute_hash(skb, key, a);
1267 break;
1268
Simon Horman25cd9ba2014-10-06 05:05:13 -07001269 case OVS_ACTION_ATTR_PUSH_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001270 err = push_mpls(skb, key, nla_data(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001271 break;
1272
1273 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001274 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001275 break;
1276
Jesse Grossccb13522011-10-25 19:26:31 -07001277 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001278 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001279 break;
1280
1281 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001282 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -07001283 break;
1284
andy zhoubef7f752017-03-20 16:32:30 -07001285 case OVS_ACTION_ATTR_RECIRC: {
1286 bool last = nla_is_last(a, rem);
1287
1288 err = execute_recirc(dp, skb, key, a, last);
1289 if (last) {
Andy Zhou971427f32014-09-15 19:37:25 -07001290 /* If this is the last action, the skb has
1291 * been consumed or freed.
1292 * Return immediately.
1293 */
1294 return err;
1295 }
1296 break;
andy zhoubef7f752017-03-20 16:32:30 -07001297 }
Andy Zhou971427f32014-09-15 19:37:25 -07001298
Jesse Grossccb13522011-10-25 19:26:31 -07001299 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001300 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001301 break;
1302
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001303 case OVS_ACTION_ATTR_SET_MASKED:
1304 case OVS_ACTION_ATTR_SET_TO_MASKED:
1305 err = execute_masked_set_action(skb, key, nla_data(a));
1306 break;
1307
andy zhou798c1662017-03-20 16:32:29 -07001308 case OVS_ACTION_ATTR_SAMPLE: {
1309 bool last = nla_is_last(a, rem);
1310
1311 err = sample(dp, skb, key, a, last);
1312 if (last)
1313 return err;
1314
Jesse Grossccb13522011-10-25 19:26:31 -07001315 break;
andy zhou798c1662017-03-20 16:32:29 -07001316 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001317
1318 case OVS_ACTION_ATTR_CT:
Joe Stringerec0d0432015-10-06 10:59:58 -07001319 if (!is_flow_key_valid(key)) {
1320 err = ovs_flow_key_update(skb, key);
1321 if (err)
1322 return err;
1323 }
1324
Joe Stringer7f8a4362015-08-26 11:31:48 -07001325 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1326 nla_data(a));
1327
1328 /* Hide stolen IP fragments from user space. */
Joe Stringer74c16612015-10-25 20:21:48 -07001329 if (err)
1330 return err == -EINPROGRESS ? 0 : err;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001331 break;
Jiri Benc91820da2016-11-10 16:28:23 +01001332
Eric Garverb8226962017-10-10 16:54:44 -04001333 case OVS_ACTION_ATTR_CT_CLEAR:
1334 err = ovs_ct_clear(skb, key);
1335 break;
1336
Jiri Benc91820da2016-11-10 16:28:23 +01001337 case OVS_ACTION_ATTR_PUSH_ETH:
1338 err = push_eth(skb, key, nla_data(a));
1339 break;
1340
1341 case OVS_ACTION_ATTR_POP_ETH:
1342 err = pop_eth(skb, key);
1343 break;
Yi Yangb2d0f5d2017-11-07 21:07:02 +08001344
1345 case OVS_ACTION_ATTR_PUSH_NSH: {
1346 u8 buffer[NSH_HDR_MAX_LEN];
1347 struct nshhdr *nh = (struct nshhdr *)buffer;
1348
1349 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1350 NSH_HDR_MAX_LEN);
1351 if (unlikely(err))
1352 break;
1353 err = push_nsh(skb, key, nh);
1354 break;
1355 }
1356
1357 case OVS_ACTION_ATTR_POP_NSH:
1358 err = pop_nsh(skb, key);
1359 break;
Andy Zhoucd8a6c32017-11-10 12:09:43 -08001360
1361 case OVS_ACTION_ATTR_METER:
1362 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1363 consume_skb(skb);
1364 return 0;
1365 }
Yifeng Sunb2335042018-07-02 08:18:03 -07001366 break;
1367
1368 case OVS_ACTION_ATTR_CLONE: {
1369 bool last = nla_is_last(a, rem);
1370
1371 err = clone(dp, skb, key, a, last);
1372 if (last)
1373 return err;
1374
1375 break;
1376 }
Jesse Grossccb13522011-10-25 19:26:31 -07001377 }
1378
1379 if (unlikely(err)) {
1380 kfree_skb(skb);
1381 return err;
1382 }
1383 }
1384
andy zhou5b8784a2017-01-27 13:45:28 -08001385 consume_skb(skb);
Jesse Grossccb13522011-10-25 19:26:31 -07001386 return 0;
1387}
1388
andy zhoubef7f752017-03-20 16:32:30 -07001389/* Execute the actions on the clone of the packet. The effect of the
1390 * execution does not affect the original 'skb' nor the original 'key'.
1391 *
1392 * The execution may be deferred in case the actions can not be executed
1393 * immediately.
1394 */
1395static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1396 struct sw_flow_key *key, u32 recirc_id,
1397 const struct nlattr *actions, int len,
1398 bool last, bool clone_flow_key)
1399{
1400 struct deferred_action *da;
1401 struct sw_flow_key *clone;
1402
1403 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1404 if (!skb) {
1405 /* Out of memory, skip this action.
1406 */
1407 return 0;
1408 }
1409
1410 /* When clone_flow_key is false, the 'key' will not be change
1411 * by the actions, then the 'key' can be used directly.
1412 * Otherwise, try to clone key from the next recursion level of
1413 * 'flow_keys'. If clone is successful, execute the actions
1414 * without deferring.
1415 */
1416 clone = clone_flow_key ? clone_key(key) : key;
1417 if (clone) {
1418 int err = 0;
1419
1420 if (actions) { /* Sample action */
1421 if (clone_flow_key)
1422 __this_cpu_inc(exec_actions_level);
1423
1424 err = do_execute_actions(dp, skb, clone,
1425 actions, len);
1426
1427 if (clone_flow_key)
1428 __this_cpu_dec(exec_actions_level);
1429 } else { /* Recirc action */
1430 clone->recirc_id = recirc_id;
1431 ovs_dp_process_packet(skb, clone);
1432 }
1433 return err;
1434 }
1435
1436 /* Out of 'flow_keys' space. Defer actions */
1437 da = add_deferred_actions(skb, key, actions, len);
1438 if (da) {
1439 if (!actions) { /* Recirc action */
1440 key = &da->pkt_key;
1441 key->recirc_id = recirc_id;
1442 }
1443 } else {
1444 /* Out of per CPU action FIFO space. Drop the 'skb' and
1445 * log an error.
1446 */
1447 kfree_skb(skb);
1448
1449 if (net_ratelimit()) {
1450 if (actions) { /* Sample action */
1451 pr_warn("%s: deferred action limit reached, drop sample action\n",
1452 ovs_dp_name(dp));
1453 } else { /* Recirc action */
1454 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1455 ovs_dp_name(dp));
1456 }
1457 }
1458 }
1459 return 0;
1460}
1461
Andy Zhou971427f32014-09-15 19:37:25 -07001462static void process_deferred_actions(struct datapath *dp)
1463{
1464 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1465
1466 /* Do not touch the FIFO in case there is no deferred actions. */
1467 if (action_fifo_is_empty(fifo))
1468 return;
1469
1470 /* Finishing executing all deferred actions. */
1471 do {
1472 struct deferred_action *da = action_fifo_get(fifo);
1473 struct sk_buff *skb = da->skb;
1474 struct sw_flow_key *key = &da->pkt_key;
1475 const struct nlattr *actions = da->actions;
andy zhou47c697a2017-03-20 16:32:27 -07001476 int actions_len = da->actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -07001477
1478 if (actions)
andy zhou47c697a2017-03-20 16:32:27 -07001479 do_execute_actions(dp, skb, key, actions, actions_len);
Andy Zhou971427f32014-09-15 19:37:25 -07001480 else
1481 ovs_dp_process_packet(skb, key);
1482 } while (!action_fifo_is_empty(fifo));
1483
1484 /* Reset FIFO for the next packet. */
1485 action_fifo_init(fifo);
1486}
1487
Jesse Grossccb13522011-10-25 19:26:31 -07001488/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001489int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -08001490 const struct sw_flow_actions *acts,
1491 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -07001492{
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001493 int err, level;
Jesse Grossccb13522011-10-25 19:26:31 -07001494
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001495 level = __this_cpu_inc_return(exec_actions_level);
Lance Richardson2679d042016-09-13 10:08:54 -04001496 if (unlikely(level > OVS_RECURSION_LIMIT)) {
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001497 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1498 ovs_dp_name(dp));
1499 kfree_skb(skb);
1500 err = -ENETDOWN;
1501 goto out;
1502 }
1503
Liping Zhang494bea32017-08-16 13:30:07 +08001504 OVS_CB(skb)->acts_origlen = acts->orig_len;
Andy Zhou971427f32014-09-15 19:37:25 -07001505 err = do_execute_actions(dp, skb, key,
1506 acts->actions, acts->actions_len);
1507
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001508 if (level == 1)
Andy Zhou971427f32014-09-15 19:37:25 -07001509 process_deferred_actions(dp);
1510
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001511out:
1512 __this_cpu_dec(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -07001513 return err;
1514}
1515
1516int action_fifos_init(void)
1517{
1518 action_fifos = alloc_percpu(struct action_fifo);
1519 if (!action_fifos)
1520 return -ENOMEM;
1521
andy zhou4572ef52017-03-20 16:32:28 -07001522 flow_keys = alloc_percpu(struct action_flow_keys);
1523 if (!flow_keys) {
Lance Richardson2679d042016-09-13 10:08:54 -04001524 free_percpu(action_fifos);
1525 return -ENOMEM;
1526 }
1527
Andy Zhou971427f32014-09-15 19:37:25 -07001528 return 0;
1529}
1530
1531void action_fifos_exit(void)
1532{
1533 free_percpu(action_fifos);
andy zhou4572ef52017-03-20 16:32:28 -07001534 free_percpu(flow_keys);
Jesse Grossccb13522011-10-25 19:26:31 -07001535}