blob: 75982506617b35208f1ce6534eb2ccff6eeef9c2 [file] [log] [blame]
Steffen Klassert21f42cc2017-04-14 10:05:53 +02001/*
2 * xfrm_device.c - IPsec device offloading code.
3 *
4 * Copyright (c) 2015 secunet Security Networks AG
5 *
6 * Author:
7 * Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/errno.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <net/dst.h>
22#include <net/xfrm.h>
23#include <linux/notifier.h>
24
Hangbin Liub81f884a2017-06-01 14:57:56 +080025#ifdef CONFIG_XFRM_OFFLOAD
Steffen Klassertf53c7232017-12-20 10:41:36 +010026struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
Steffen Klassertf6e27112017-04-14 10:07:28 +020027{
28 int err;
Steffen Klassertf53c7232017-12-20 10:41:36 +010029 unsigned long flags;
Steffen Klassertf6e27112017-04-14 10:07:28 +020030 struct xfrm_state *x;
Steffen Klassert3dca3f32017-12-20 10:41:31 +010031 struct sk_buff *skb2;
Steffen Klassertf53c7232017-12-20 10:41:36 +010032 struct softnet_data *sd;
Steffen Klassert3dca3f32017-12-20 10:41:31 +010033 netdev_features_t esp_features = features;
Steffen Klassertf6e27112017-04-14 10:07:28 +020034 struct xfrm_offload *xo = xfrm_offload(skb);
35
Steffen Klassert3dca3f32017-12-20 10:41:31 +010036 if (!xo)
37 return skb;
Steffen Klassertf6e27112017-04-14 10:07:28 +020038
Steffen Klassert3dca3f32017-12-20 10:41:31 +010039 if (!(features & NETIF_F_HW_ESP))
40 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
Steffen Klassertf6e27112017-04-14 10:07:28 +020041
Steffen Klassert3dca3f32017-12-20 10:41:31 +010042 x = skb->sp->xvec[skb->sp->len - 1];
43 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
44 return skb;
45
Steffen Klassertf53c7232017-12-20 10:41:36 +010046 local_irq_save(flags);
47 sd = this_cpu_ptr(&softnet_data);
48 err = !skb_queue_empty(&sd->xfrm_backlog);
49 local_irq_restore(flags);
50
51 if (err) {
52 *again = true;
53 return skb;
54 }
55
Steffen Klassert3dca3f32017-12-20 10:41:31 +010056 if (skb_is_gso(skb)) {
57 struct net_device *dev = skb->dev;
58
59 if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
60 struct sk_buff *segs;
61
62 /* Packet got rerouted, fixup features and segment it. */
63 esp_features = esp_features & ~(NETIF_F_HW_ESP
64 | NETIF_F_GSO_ESP);
65
66 segs = skb_gso_segment(skb, esp_features);
67 if (IS_ERR(segs)) {
Steffen Klassert3dca3f32017-12-20 10:41:31 +010068 kfree_skb(skb);
Steffen Klassertf53c7232017-12-20 10:41:36 +010069 atomic_long_inc(&dev->tx_dropped);
Steffen Klassert3dca3f32017-12-20 10:41:31 +010070 return NULL;
71 } else {
72 consume_skb(skb);
73 skb = segs;
74 }
Steffen Klassert3dca3f32017-12-20 10:41:31 +010075 }
76 }
77
78 if (!skb->next) {
Steffen Klassertf6e27112017-04-14 10:07:28 +020079 x->outer_mode->xmit(x, skb);
80
Steffen Klassertf53c7232017-12-20 10:41:36 +010081 xo->flags |= XFRM_DEV_RESUME;
82
Steffen Klassert3dca3f32017-12-20 10:41:31 +010083 err = x->type_offload->xmit(x, skb, esp_features);
Steffen Klassertf6e27112017-04-14 10:07:28 +020084 if (err) {
Steffen Klassertf53c7232017-12-20 10:41:36 +010085 if (err == -EINPROGRESS)
86 return NULL;
87
Steffen Klassertf6e27112017-04-14 10:07:28 +020088 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
Steffen Klassert3dca3f32017-12-20 10:41:31 +010089 kfree_skb(skb);
90 return NULL;
Steffen Klassertf6e27112017-04-14 10:07:28 +020091 }
92
93 skb_push(skb, skb->data - skb_mac_header(skb));
Steffen Klassert3dca3f32017-12-20 10:41:31 +010094
95 return skb;
Steffen Klassertf6e27112017-04-14 10:07:28 +020096 }
97
Steffen Klassert3dca3f32017-12-20 10:41:31 +010098 skb2 = skb;
Steffen Klassert3dca3f32017-12-20 10:41:31 +010099
100 do {
101 struct sk_buff *nskb = skb2->next;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100102 skb2->next = NULL;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100103
104 xo = xfrm_offload(skb2);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100105 xo->flags |= XFRM_DEV_RESUME;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100106
107 x->outer_mode->xmit(x, skb2);
108
109 err = x->type_offload->xmit(x, skb2, esp_features);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100110 if (!err) {
111 skb2->next = nskb;
112 } else if (err != -EINPROGRESS) {
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100113 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
114 skb2->next = nskb;
115 kfree_skb_list(skb2);
116 return NULL;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100117 } else {
118 if (skb == skb2)
119 skb = nskb;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100120
Steffen Klassertf53c7232017-12-20 10:41:36 +0100121 if (!skb)
122 return NULL;
123
124 goto skip_push;
125 }
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100126
127 skb_push(skb2, skb2->data - skb_mac_header(skb2));
128
Steffen Klassertf53c7232017-12-20 10:41:36 +0100129skip_push:
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100130 skb2 = nskb;
131 } while (skb2);
132
133 return skb;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200134}
135EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
136
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200137int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
138 struct xfrm_user_offload *xuo)
139{
140 int err;
141 struct dst_entry *dst;
142 struct net_device *dev;
143 struct xfrm_state_offload *xso = &x->xso;
144 xfrm_address_t *saddr;
145 xfrm_address_t *daddr;
146
147 if (!x->type_offload)
Ilan Tayariffdb5212017-08-01 12:49:08 +0300148 return -EINVAL;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200149
150 /* We don't yet support UDP encapsulation, TFC padding and ESN. */
151 if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
Yossef Efraim43024b92017-11-28 11:49:29 +0200152 return -EINVAL;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200153
154 dev = dev_get_by_index(net, xuo->ifindex);
155 if (!dev) {
156 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
157 saddr = &x->props.saddr;
158 daddr = &x->id.daddr;
159 } else {
160 saddr = &x->id.daddr;
161 daddr = &x->props.saddr;
162 }
163
Lorenzo Colitti077fbac2017-08-11 02:11:33 +0900164 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
165 x->props.family, x->props.output_mark);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200166 if (IS_ERR(dst))
167 return 0;
168
169 dev = dst->dev;
170
171 dev_hold(dev);
172 dst_release(dst);
173 }
174
175 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
Steffen Klassert67a63382017-09-04 10:59:55 +0200176 xso->dev = NULL;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200177 dev_put(dev);
178 return 0;
179 }
180
181 xso->dev = dev;
182 xso->num_exthdrs = 1;
183 xso->flags = xuo->flags;
184
185 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
186 if (err) {
187 dev_put(dev);
188 return err;
189 }
190
191 return 0;
192}
193EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
194
195bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
196{
197 int mtu;
198 struct dst_entry *dst = skb_dst(skb);
199 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
200 struct net_device *dev = x->xso.dev;
201
202 if (!x->type_offload || x->encap)
203 return false;
204
Steffen Klassert95bff4b2017-12-20 10:41:42 +0100205 if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
206 (!xdst->child->xfrm && x->type->get_mtu)) {
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200207 mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
208
209 if (skb->len <= mtu)
210 goto ok;
211
212 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
213 goto ok;
214 }
215
216 return false;
217
218ok:
219 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
220 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
221
222 return true;
223}
224EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100225
226void xfrm_dev_resume(struct sk_buff *skb)
227{
228 struct net_device *dev = skb->dev;
229 int ret = NETDEV_TX_BUSY;
230 struct netdev_queue *txq;
231 struct softnet_data *sd;
232 unsigned long flags;
233
234 rcu_read_lock();
235 txq = netdev_pick_tx(dev, skb, NULL);
236
237 HARD_TX_LOCK(dev, txq, smp_processor_id());
238 if (!netif_xmit_frozen_or_stopped(txq))
239 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
240 HARD_TX_UNLOCK(dev, txq);
241
242 if (!dev_xmit_complete(ret)) {
243 local_irq_save(flags);
244 sd = this_cpu_ptr(&softnet_data);
245 skb_queue_tail(&sd->xfrm_backlog, skb);
246 raise_softirq_irqoff(NET_TX_SOFTIRQ);
247 local_irq_restore(flags);
248 }
249 rcu_read_unlock();
250}
251EXPORT_SYMBOL_GPL(xfrm_dev_resume);
252
253void xfrm_dev_backlog(struct softnet_data *sd)
254{
255 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
256 struct sk_buff_head list;
257 struct sk_buff *skb;
258
259 if (skb_queue_empty(xfrm_backlog))
260 return;
261
262 __skb_queue_head_init(&list);
263
264 spin_lock(&xfrm_backlog->lock);
265 skb_queue_splice_init(xfrm_backlog, &list);
266 spin_unlock(&xfrm_backlog->lock);
267
268 while (!skb_queue_empty(&list)) {
269 skb = __skb_dequeue(&list);
270 xfrm_dev_resume(skb);
271 }
272
273}
Hangbin Liub81f884a2017-06-01 14:57:56 +0800274#endif
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200275
Shannon Nelson92a23202017-12-19 15:35:48 -0800276static int xfrm_api_check(struct net_device *dev)
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200277{
Shannon Nelson92a23202017-12-19 15:35:48 -0800278#ifdef CONFIG_XFRM_OFFLOAD
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200279 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
280 !(dev->features & NETIF_F_HW_ESP))
281 return NOTIFY_BAD;
282
Shannon Nelson92a23202017-12-19 15:35:48 -0800283 if ((dev->features & NETIF_F_HW_ESP) &&
284 (!(dev->xfrmdev_ops &&
285 dev->xfrmdev_ops->xdo_dev_state_add &&
286 dev->xfrmdev_ops->xdo_dev_state_delete)))
287 return NOTIFY_BAD;
288#else
289 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
290 return NOTIFY_BAD;
291#endif
292
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200293 return NOTIFY_DONE;
294}
295
Shannon Nelson92a23202017-12-19 15:35:48 -0800296static int xfrm_dev_register(struct net_device *dev)
297{
298 return xfrm_api_check(dev);
299}
300
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200301static int xfrm_dev_unregister(struct net_device *dev)
302{
Florian Westphalec30d782017-07-17 13:57:27 +0200303 xfrm_policy_cache_flush();
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200304 return NOTIFY_DONE;
305}
306
307static int xfrm_dev_feat_change(struct net_device *dev)
308{
Shannon Nelson92a23202017-12-19 15:35:48 -0800309 return xfrm_api_check(dev);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200310}
311
312static int xfrm_dev_down(struct net_device *dev)
313{
Ilan Tayari2c1497b2017-05-08 10:30:18 +0300314 if (dev->features & NETIF_F_HW_ESP)
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200315 xfrm_dev_state_flush(dev_net(dev), dev, true);
316
Florian Westphalec30d782017-07-17 13:57:27 +0200317 xfrm_policy_cache_flush();
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200318 return NOTIFY_DONE;
319}
320
Steffen Klassert21f42cc2017-04-14 10:05:53 +0200321static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
322{
323 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
324
325 switch (event) {
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200326 case NETDEV_REGISTER:
327 return xfrm_dev_register(dev);
328
329 case NETDEV_UNREGISTER:
330 return xfrm_dev_unregister(dev);
331
332 case NETDEV_FEAT_CHANGE:
333 return xfrm_dev_feat_change(dev);
334
Steffen Klassert21f42cc2017-04-14 10:05:53 +0200335 case NETDEV_DOWN:
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200336 return xfrm_dev_down(dev);
Steffen Klassert21f42cc2017-04-14 10:05:53 +0200337 }
338 return NOTIFY_DONE;
339}
340
341static struct notifier_block xfrm_dev_notifier = {
342 .notifier_call = xfrm_dev_event,
343};
344
345void __net_init xfrm_dev_init(void)
346{
347 register_netdevice_notifier(&xfrm_dev_notifier);
348}