blob: 9ff2ab63e6392987a4690f41da70ebd87cacdcbb [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
30
Magnus Karlsson35fcde72018-05-02 13:01:34 +020031#define TX_BATCH_SIZE 16
32
Björn Töpelc0c77d82018-05-02 13:01:23 +020033static struct xdp_sock *xdp_sk(struct sock *sk)
34{
35 return (struct xdp_sock *)sk;
36}
37
Björn Töpelfbfc5042018-05-02 13:01:28 +020038bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
39{
Björn Töpel173d3ad2018-06-04 14:05:55 +020040 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
41 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc5042018-05-02 13:01:28 +020042}
43
Björn Töpel173d3ad2018-06-04 14:05:55 +020044u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020045{
Björn Töpel173d3ad2018-06-04 14:05:55 +020046 return xskq_peek_addr(umem->fq, addr);
47}
48EXPORT_SYMBOL(xsk_umem_peek_addr);
49
50void xsk_umem_discard_addr(struct xdp_umem *umem)
51{
52 xskq_discard_addr(umem->fq);
53}
54EXPORT_SYMBOL(xsk_umem_discard_addr);
55
56static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
57{
Björn Töpelc4971762018-05-02 13:01:27 +020058 void *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +020059 u64 addr;
Björn Töpel4e64c832018-06-04 13:57:11 +020060 int err;
Björn Töpelc4971762018-05-02 13:01:27 +020061
Björn Töpelbbff2f32018-06-04 13:57:13 +020062 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
63 len > xs->umem->chunk_size_nohr) {
Björn Töpela509a952018-06-04 13:57:12 +020064 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020065 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +020066 }
Björn Töpelc4971762018-05-02 13:01:27 +020067
Björn Töpelbbff2f32018-06-04 13:57:13 +020068 addr += xs->umem->headroom;
69
70 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpelc4971762018-05-02 13:01:27 +020071 memcpy(buffer, xdp->data, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +020072 err = xskq_produce_batch_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +020073 if (!err) {
Björn Töpelbbff2f32018-06-04 13:57:13 +020074 xskq_discard_addr(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +020075 xdp_return_buff(xdp);
76 return 0;
77 }
78
79 xs->rx_dropped++;
80 return err;
81}
82
83static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
84{
85 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
86
Jakub Kicinski2d55d612018-07-27 20:20:08 -070087 if (err)
Björn Töpela509a952018-06-04 13:57:12 +020088 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020089
90 return err;
91}
92
93int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
94{
Björn Töpel173d3ad2018-06-04 14:05:55 +020095 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +020096
Björn Töpel173d3ad2018-06-04 14:05:55 +020097 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
98 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +020099
Björn Töpel173d3ad2018-06-04 14:05:55 +0200100 len = xdp->data_end - xdp->data;
101
102 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
103 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200104}
105
106void xsk_flush(struct xdp_sock *xs)
107{
108 xskq_produce_flush_desc(xs->rx);
109 xs->sk.sk_data_ready(&xs->sk);
110}
111
112int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
113{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200114 u32 len = xdp->data_end - xdp->data;
115 void *buffer;
116 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200117 int err;
118
Björn Töpel5d902372018-06-12 12:02:56 +0200119 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
120 return -EINVAL;
121
Björn Töpel173d3ad2018-06-04 14:05:55 +0200122 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
123 len > xs->umem->chunk_size_nohr) {
124 xs->rx_dropped++;
125 return -ENOSPC;
126 }
Björn Töpelc4971762018-05-02 13:01:27 +0200127
Björn Töpel173d3ad2018-06-04 14:05:55 +0200128 addr += xs->umem->headroom;
129
130 buffer = xdp_umem_get_data(xs->umem, addr);
131 memcpy(buffer, xdp->data, len);
132 err = xskq_produce_batch_desc(xs->rx, addr, len);
133 if (!err) {
134 xskq_discard_addr(xs->umem->fq);
135 xsk_flush(xs);
136 return 0;
137 }
138
139 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200140 return err;
141}
142
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200143void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
144{
145 xskq_produce_flush_addr_n(umem->cq, nb_entries);
146}
147EXPORT_SYMBOL(xsk_umem_complete_tx);
148
149void xsk_umem_consume_tx_done(struct xdp_umem *umem)
150{
151 struct xdp_sock *xs;
152
153 rcu_read_lock();
154 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
155 xs->sk.sk_write_space(&xs->sk);
156 }
157 rcu_read_unlock();
158}
159EXPORT_SYMBOL(xsk_umem_consume_tx_done);
160
161bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
162{
163 struct xdp_desc desc;
164 struct xdp_sock *xs;
165
166 rcu_read_lock();
167 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
168 if (!xskq_peek_desc(xs->tx, &desc))
169 continue;
170
171 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
172 goto out;
173
174 *dma = xdp_umem_get_dma(umem, desc.addr);
175 *len = desc.len;
176
177 xskq_discard_desc(xs->tx);
178 rcu_read_unlock();
179 return true;
180 }
181
182out:
183 rcu_read_unlock();
184 return false;
185}
186EXPORT_SYMBOL(xsk_umem_consume_tx);
187
188static int xsk_zc_xmit(struct sock *sk)
189{
190 struct xdp_sock *xs = xdp_sk(sk);
191 struct net_device *dev = xs->dev;
192
193 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
194}
195
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200196static void xsk_destruct_skb(struct sk_buff *skb)
197{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200198 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200199 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200200 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200201
Magnus Karlssona9744f72018-06-29 09:48:20 +0200202 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200203 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
Magnus Karlssona9744f72018-06-29 09:48:20 +0200204 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200205
206 sock_wfree(skb);
207}
208
209static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
210 size_t total_len)
211{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200212 u32 max_batch = TX_BATCH_SIZE;
213 struct xdp_sock *xs = xdp_sk(sk);
214 bool sent_frame = false;
215 struct xdp_desc desc;
216 struct sk_buff *skb;
217 int err = 0;
218
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200219 mutex_lock(&xs->mutex);
220
Ilya Maximetsfca3e822019-07-04 17:25:03 +0300221 if (xs->queue_id >= xs->dev->real_num_tx_queues)
222 goto out;
223
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200224 while (xskq_peek_desc(xs->tx, &desc)) {
225 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200226 u64 addr;
227 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200228
229 if (max_batch-- == 0) {
230 err = -EAGAIN;
231 goto out;
232 }
233
Magnus Karlsson09210c42018-07-11 10:12:52 +0200234 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200235 skb = sock_alloc_send_skb(sk, len, 1, &err);
Li RongQingd10a69c2020-06-11 13:11:06 +0800236 if (unlikely(!skb))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200237 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200238
239 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200240 addr = desc.addr;
241 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200242 err = skb_store_bits(skb, 0, buffer, len);
Ilya Maximetsfca3e822019-07-04 17:25:03 +0300243 if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200244 kfree_skb(skb);
245 goto out;
246 }
247
248 skb->dev = xs->dev;
249 skb->priority = sk->sk_priority;
250 skb->mark = sk->sk_mark;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200251 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200252 skb->destructor = xsk_destruct_skb;
253
254 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonfe588682018-06-29 09:48:18 +0200255 xskq_discard_desc(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200256 /* Ignore NET_XMIT_CN as packet might have been sent */
257 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200258 /* SKB completed but not sent */
259 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200260 goto out;
261 }
262
263 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200264 }
265
266out:
267 if (sent_frame)
268 sk->sk_write_space(sk);
269
270 mutex_unlock(&xs->mutex);
271 return err;
272}
273
274static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
275{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200276 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200277 struct sock *sk = sock->sk;
278 struct xdp_sock *xs = xdp_sk(sk);
279
280 if (unlikely(!xs->dev))
281 return -ENXIO;
282 if (unlikely(!(xs->dev->flags & IFF_UP)))
283 return -ENETDOWN;
Magnus Karlsson6efb4432018-07-11 10:12:51 +0200284 if (unlikely(!xs->tx))
285 return -ENOBUFS;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200286 if (need_wait)
287 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200288
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200289 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200290}
291
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700292static unsigned int xsk_poll(struct file *file, struct socket *sock,
293 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200294{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700295 unsigned int mask = datagram_poll(file, sock, wait);
Björn Töpelc4971762018-05-02 13:01:27 +0200296 struct sock *sk = sock->sk;
297 struct xdp_sock *xs = xdp_sk(sk);
298
299 if (xs->rx && !xskq_empty_desc(xs->rx))
300 mask |= POLLIN | POLLRDNORM;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200301 if (xs->tx && !xskq_full_desc(xs->tx))
302 mask |= POLLOUT | POLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200303
304 return mask;
305}
306
Björn Töpelb9b6b682018-05-02 13:01:25 +0200307static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
308 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200309{
310 struct xsk_queue *q;
311
312 if (entries == 0 || *queue || !is_power_of_2(entries))
313 return -EINVAL;
314
Björn Töpelb9b6b682018-05-02 13:01:25 +0200315 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200316 if (!q)
317 return -ENOMEM;
318
Björn Töpel37b07692018-05-22 09:35:01 +0200319 /* Make sure queue is ready before it can be seen by others */
320 smp_wmb();
Björn Töpelc31ead32019-09-04 13:49:10 +0200321 WRITE_ONCE(*queue, q);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200322 return 0;
323}
324
Björn Töpelc0c77d82018-05-02 13:01:23 +0200325static int xsk_release(struct socket *sock)
326{
327 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200328 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200329 struct net *net;
330
331 if (!sk)
332 return 0;
333
334 net = sock_net(sk);
335
336 local_bh_disable();
337 sock_prot_inuse_add(net, sk->sk_prot, -1);
338 local_bh_enable();
339
Magnus Karlsson965a9902018-05-02 13:01:26 +0200340 if (xs->dev) {
Björn Töpelb17ddbd2018-10-05 13:25:15 +0200341 struct net_device *dev = xs->dev;
342
Björn Töpel959b71d2018-05-22 09:34:56 +0200343 /* Wait for driver to stop using the xdp socket. */
Björn Töpelb17ddbd2018-10-05 13:25:15 +0200344 xdp_del_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200345 xs->dev = NULL;
Björn Töpelb17ddbd2018-10-05 13:25:15 +0200346 synchronize_net();
347 dev_put(dev);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200348 }
349
Björn Töpelb17ddbd2018-10-05 13:25:15 +0200350 xskq_destroy(xs->rx);
351 xskq_destroy(xs->tx);
352
Björn Töpelc0c77d82018-05-02 13:01:23 +0200353 sock_orphan(sk);
354 sock->sk = NULL;
355
356 sk_refcnt_debug_release(sk);
357 sock_put(sk);
358
359 return 0;
360}
361
Magnus Karlsson965a9902018-05-02 13:01:26 +0200362static struct socket *xsk_lookup_xsk_from_fd(int fd)
363{
364 struct socket *sock;
365 int err;
366
367 sock = sockfd_lookup(fd, &err);
368 if (!sock)
369 return ERR_PTR(-ENOTSOCK);
370
371 if (sock->sk->sk_family != PF_XDP) {
372 sockfd_put(sock);
373 return ERR_PTR(-ENOPROTOOPT);
374 }
375
376 return sock;
377}
378
379static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
380{
381 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
382 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200383 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200384 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200385 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200386 int err = 0;
387
388 if (addr_len < sizeof(struct sockaddr_xdp))
389 return -EINVAL;
390 if (sxdp->sxdp_family != AF_XDP)
391 return -EINVAL;
392
393 mutex_lock(&xs->mutex);
Björn Töpel959b71d2018-05-22 09:34:56 +0200394 if (xs->dev) {
395 err = -EBUSY;
396 goto out_release;
397 }
398
Magnus Karlsson965a9902018-05-02 13:01:26 +0200399 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
400 if (!dev) {
401 err = -ENODEV;
402 goto out_release;
403 }
404
Magnus Karlssonf6145902018-05-02 13:01:32 +0200405 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200406 err = -EINVAL;
407 goto out_unlock;
408 }
409
Björn Töpel173d3ad2018-06-04 14:05:55 +0200410 qid = sxdp->sxdp_queue_id;
411
412 if ((xs->rx && qid >= dev->real_num_rx_queues) ||
413 (xs->tx && qid >= dev->real_num_tx_queues)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200414 err = -EINVAL;
415 goto out_unlock;
416 }
417
Björn Töpel173d3ad2018-06-04 14:05:55 +0200418 flags = sxdp->sxdp_flags;
419
420 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200421 struct xdp_sock *umem_xs;
422 struct socket *sock;
423
Björn Töpel173d3ad2018-06-04 14:05:55 +0200424 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
425 /* Cannot specify flags for shared sockets. */
426 err = -EINVAL;
427 goto out_unlock;
428 }
429
Magnus Karlsson965a9902018-05-02 13:01:26 +0200430 if (xs->umem) {
431 /* We have already our own. */
432 err = -EINVAL;
433 goto out_unlock;
434 }
435
436 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
437 if (IS_ERR(sock)) {
438 err = PTR_ERR(sock);
439 goto out_unlock;
440 }
441
442 umem_xs = xdp_sk(sock->sk);
443 if (!umem_xs->umem) {
444 /* No umem to inherit. */
445 err = -EBADF;
446 sockfd_put(sock);
447 goto out_unlock;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200448 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200449 err = -EINVAL;
450 sockfd_put(sock);
451 goto out_unlock;
452 }
453
454 xdp_get_umem(umem_xs->umem);
Björn Töpel43a807542019-09-04 13:49:11 +0200455 WRITE_ONCE(xs->umem, umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200456 sockfd_put(sock);
457 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
458 err = -EINVAL;
459 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200460 } else {
461 /* This xsk has its own umem. */
462 xskq_set_umem(xs->umem->fq, &xs->umem->props);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200463 xskq_set_umem(xs->umem->cq, &xs->umem->props);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200464
465 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
466 if (err)
467 goto out_unlock;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200468 }
469
Magnus Karlsson965a9902018-05-02 13:01:26 +0200470 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200471 xs->zc = xs->umem->zc;
472 xs->queue_id = qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200473 xskq_set_umem(xs->rx, &xs->umem->props);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200474 xskq_set_umem(xs->tx, &xs->umem->props);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200475 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200476
477out_unlock:
478 if (err)
479 dev_put(dev);
480out_release:
481 mutex_unlock(&xs->mutex);
482 return err;
483}
484
Björn Töpelc0c77d82018-05-02 13:01:23 +0200485static int xsk_setsockopt(struct socket *sock, int level, int optname,
486 char __user *optval, unsigned int optlen)
487{
488 struct sock *sk = sock->sk;
489 struct xdp_sock *xs = xdp_sk(sk);
490 int err;
491
492 if (level != SOL_XDP)
493 return -ENOPROTOOPT;
494
495 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200496 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200497 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200498 {
499 struct xsk_queue **q;
500 int entries;
501
502 if (optlen < sizeof(entries))
503 return -EINVAL;
504 if (copy_from_user(&entries, optval, sizeof(entries)))
505 return -EFAULT;
506
507 mutex_lock(&xs->mutex);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200508 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200509 err = xsk_init_queue(entries, q, false);
510 mutex_unlock(&xs->mutex);
511 return err;
512 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200513 case XDP_UMEM_REG:
514 {
515 struct xdp_umem_reg mr;
516 struct xdp_umem *umem;
517
Björn Töpelc0c77d82018-05-02 13:01:23 +0200518 if (copy_from_user(&mr, optval, sizeof(mr)))
519 return -EFAULT;
520
521 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200522 if (xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200523 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200524 return -EBUSY;
525 }
526
527 umem = xdp_umem_create(&mr);
528 if (IS_ERR(umem)) {
529 mutex_unlock(&xs->mutex);
530 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200531 }
532
533 /* Make sure umem is ready before it can be seen by others */
534 smp_wmb();
Björn Töpel43a807542019-09-04 13:49:11 +0200535 WRITE_ONCE(xs->umem, umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200536 mutex_unlock(&xs->mutex);
537 return 0;
538 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200539 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200540 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200541 {
542 struct xsk_queue **q;
543 int entries;
544
Magnus Karlsson423f3832018-05-02 13:01:24 +0200545 if (copy_from_user(&entries, optval, sizeof(entries)))
546 return -EFAULT;
547
548 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200549 if (!xs->umem) {
550 mutex_unlock(&xs->mutex);
551 return -EINVAL;
552 }
553
Magnus Karlssonfe230832018-05-02 13:01:31 +0200554 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
555 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200556 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200557 mutex_unlock(&xs->mutex);
558 return err;
559 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200560 default:
561 break;
562 }
563
564 return -ENOPROTOOPT;
565}
566
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200567static int xsk_getsockopt(struct socket *sock, int level, int optname,
568 char __user *optval, int __user *optlen)
569{
570 struct sock *sk = sock->sk;
571 struct xdp_sock *xs = xdp_sk(sk);
572 int len;
573
574 if (level != SOL_XDP)
575 return -ENOPROTOOPT;
576
577 if (get_user(len, optlen))
578 return -EFAULT;
579 if (len < 0)
580 return -EINVAL;
581
582 switch (optname) {
583 case XDP_STATISTICS:
584 {
585 struct xdp_statistics stats;
586
587 if (len < sizeof(stats))
588 return -EINVAL;
589
590 mutex_lock(&xs->mutex);
591 stats.rx_dropped = xs->rx_dropped;
592 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
593 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
594 mutex_unlock(&xs->mutex);
595
596 if (copy_to_user(optval, &stats, sizeof(stats)))
597 return -EFAULT;
598 if (put_user(sizeof(stats), optlen))
599 return -EFAULT;
600
601 return 0;
602 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200603 case XDP_MMAP_OFFSETS:
604 {
605 struct xdp_mmap_offsets off;
606
607 if (len < sizeof(off))
608 return -EINVAL;
609
610 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
611 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
612 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
613 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
614 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
615 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
616
617 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
618 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
619 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
620 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
621 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
622 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
623
624 len = sizeof(off);
625 if (copy_to_user(optval, &off, len))
626 return -EFAULT;
627 if (put_user(len, optlen))
628 return -EFAULT;
629
630 return 0;
631 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200632 default:
633 break;
634 }
635
636 return -EOPNOTSUPP;
637}
638
Magnus Karlsson423f3832018-05-02 13:01:24 +0200639static int xsk_mmap(struct file *file, struct socket *sock,
640 struct vm_area_struct *vma)
641{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200642 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200643 unsigned long size = vma->vm_end - vma->vm_start;
644 struct xdp_sock *xs = xdp_sk(sock->sk);
645 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200646 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200647 unsigned long pfn;
648 struct page *qpg;
649
Björn Töpelb9b6b682018-05-02 13:01:25 +0200650 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200651 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200652 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200653 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200654 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200655 umem = READ_ONCE(xs->umem);
656 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200657 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200658
Magnus Karlsson9aea6482019-02-08 14:13:50 +0100659 /* Matches the smp_wmb() in XDP_UMEM_REG */
660 smp_rmb();
Björn Töpelb9b6b682018-05-02 13:01:25 +0200661 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200662 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200663 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200664 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200665 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200666
667 if (!q)
668 return -EINVAL;
669
Magnus Karlsson9aea6482019-02-08 14:13:50 +0100670 /* Matches the smp_wmb() in xsk_init_queue */
671 smp_rmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200672 qpg = virt_to_head_page(q->ring);
673 if (size > (PAGE_SIZE << compound_order(qpg)))
674 return -EINVAL;
675
676 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
677 return remap_pfn_range(vma, vma->vm_start, pfn,
678 size, vma->vm_page_prot);
679}
680
Björn Töpelc0c77d82018-05-02 13:01:23 +0200681static struct proto xsk_proto = {
682 .name = "XDP",
683 .owner = THIS_MODULE,
684 .obj_size = sizeof(struct xdp_sock),
685};
686
687static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +0200688 .family = PF_XDP,
689 .owner = THIS_MODULE,
690 .release = xsk_release,
691 .bind = xsk_bind,
692 .connect = sock_no_connect,
693 .socketpair = sock_no_socketpair,
694 .accept = sock_no_accept,
695 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700696 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +0200697 .ioctl = sock_no_ioctl,
698 .listen = sock_no_listen,
699 .shutdown = sock_no_shutdown,
700 .setsockopt = xsk_setsockopt,
701 .getsockopt = xsk_getsockopt,
702 .sendmsg = xsk_sendmsg,
703 .recvmsg = sock_no_recvmsg,
704 .mmap = xsk_mmap,
705 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +0200706};
707
708static void xsk_destruct(struct sock *sk)
709{
710 struct xdp_sock *xs = xdp_sk(sk);
711
712 if (!sock_flag(sk, SOCK_DEAD))
713 return;
714
Björn Töpelc0c77d82018-05-02 13:01:23 +0200715 xdp_put_umem(xs->umem);
716
717 sk_refcnt_debug_dec(sk);
718}
719
720static int xsk_create(struct net *net, struct socket *sock, int protocol,
721 int kern)
722{
723 struct sock *sk;
724 struct xdp_sock *xs;
725
726 if (!ns_capable(net->user_ns, CAP_NET_RAW))
727 return -EPERM;
728 if (sock->type != SOCK_RAW)
729 return -ESOCKTNOSUPPORT;
730
731 if (protocol)
732 return -EPROTONOSUPPORT;
733
734 sock->state = SS_UNCONNECTED;
735
736 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
737 if (!sk)
738 return -ENOBUFS;
739
740 sock->ops = &xsk_proto_ops;
741
742 sock_init_data(sock, sk);
743
744 sk->sk_family = PF_XDP;
745
746 sk->sk_destruct = xsk_destruct;
747 sk_refcnt_debug_inc(sk);
748
Björn Töpelcee27162018-10-08 19:40:16 +0200749 sock_set_flag(sk, SOCK_RCU_FREE);
750
Björn Töpelc0c77d82018-05-02 13:01:23 +0200751 xs = xdp_sk(sk);
752 mutex_init(&xs->mutex);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200753 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200754
755 local_bh_disable();
756 sock_prot_inuse_add(net, &xsk_proto, 1);
757 local_bh_enable();
758
759 return 0;
760}
761
762static const struct net_proto_family xsk_family_ops = {
763 .family = PF_XDP,
764 .create = xsk_create,
765 .owner = THIS_MODULE,
766};
767
768static int __init xsk_init(void)
769{
770 int err;
771
772 err = proto_register(&xsk_proto, 0 /* no slab */);
773 if (err)
774 goto out;
775
776 err = sock_register(&xsk_family_ops);
777 if (err)
778 goto out_proto;
779
780 return 0;
781
782out_proto:
783 proto_unregister(&xsk_proto);
784out:
785 return err;
786}
787
788fs_initcall(xsk_init);