blob: 9c784307f7b00c11acb73dff6df30e7fef84fd78 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
30
Magnus Karlsson35fcde72018-05-02 13:01:34 +020031#define TX_BATCH_SIZE 16
32
Björn Töpelc0c77d82018-05-02 13:01:23 +020033static struct xdp_sock *xdp_sk(struct sock *sk)
34{
35 return (struct xdp_sock *)sk;
36}
37
Björn Töpelfbfc5042018-05-02 13:01:28 +020038bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
39{
Björn Töpel173d3ad2018-06-04 14:05:55 +020040 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
41 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc5042018-05-02 13:01:28 +020042}
43
Björn Töpel173d3ad2018-06-04 14:05:55 +020044u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020045{
Björn Töpel173d3ad2018-06-04 14:05:55 +020046 return xskq_peek_addr(umem->fq, addr);
47}
48EXPORT_SYMBOL(xsk_umem_peek_addr);
49
50void xsk_umem_discard_addr(struct xdp_umem *umem)
51{
52 xskq_discard_addr(umem->fq);
53}
54EXPORT_SYMBOL(xsk_umem_discard_addr);
55
56static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
57{
Björn Töpelc4971762018-05-02 13:01:27 +020058 void *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +020059 u64 addr;
Björn Töpel4e64c832018-06-04 13:57:11 +020060 int err;
Björn Töpelc4971762018-05-02 13:01:27 +020061
Björn Töpelbbff2f32018-06-04 13:57:13 +020062 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
63 len > xs->umem->chunk_size_nohr) {
Björn Töpela509a952018-06-04 13:57:12 +020064 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020065 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +020066 }
Björn Töpelc4971762018-05-02 13:01:27 +020067
Björn Töpelbbff2f32018-06-04 13:57:13 +020068 addr += xs->umem->headroom;
69
70 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpelc4971762018-05-02 13:01:27 +020071 memcpy(buffer, xdp->data, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +020072 err = xskq_produce_batch_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +020073 if (!err) {
Björn Töpelbbff2f32018-06-04 13:57:13 +020074 xskq_discard_addr(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +020075 xdp_return_buff(xdp);
76 return 0;
77 }
78
79 xs->rx_dropped++;
80 return err;
81}
82
83static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
84{
85 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
86
87 if (err) {
88 xdp_return_buff(xdp);
Björn Töpela509a952018-06-04 13:57:12 +020089 xs->rx_dropped++;
Björn Töpel173d3ad2018-06-04 14:05:55 +020090 }
Björn Töpelc4971762018-05-02 13:01:27 +020091
92 return err;
93}
94
95int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
96{
Björn Töpel173d3ad2018-06-04 14:05:55 +020097 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +020098
Björn Töpel173d3ad2018-06-04 14:05:55 +020099 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
100 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200101
Björn Töpel173d3ad2018-06-04 14:05:55 +0200102 len = xdp->data_end - xdp->data;
103
104 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
105 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200106}
107
108void xsk_flush(struct xdp_sock *xs)
109{
110 xskq_produce_flush_desc(xs->rx);
111 xs->sk.sk_data_ready(&xs->sk);
112}
113
114int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
115{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200116 u32 len = xdp->data_end - xdp->data;
117 void *buffer;
118 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200119 int err;
120
Björn Töpel5d902372018-06-12 12:02:56 +0200121 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
122 return -EINVAL;
123
Björn Töpel173d3ad2018-06-04 14:05:55 +0200124 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
125 len > xs->umem->chunk_size_nohr) {
126 xs->rx_dropped++;
127 return -ENOSPC;
128 }
Björn Töpelc4971762018-05-02 13:01:27 +0200129
Björn Töpel173d3ad2018-06-04 14:05:55 +0200130 addr += xs->umem->headroom;
131
132 buffer = xdp_umem_get_data(xs->umem, addr);
133 memcpy(buffer, xdp->data, len);
134 err = xskq_produce_batch_desc(xs->rx, addr, len);
135 if (!err) {
136 xskq_discard_addr(xs->umem->fq);
137 xsk_flush(xs);
138 return 0;
139 }
140
141 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200142 return err;
143}
144
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200145void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
146{
147 xskq_produce_flush_addr_n(umem->cq, nb_entries);
148}
149EXPORT_SYMBOL(xsk_umem_complete_tx);
150
151void xsk_umem_consume_tx_done(struct xdp_umem *umem)
152{
153 struct xdp_sock *xs;
154
155 rcu_read_lock();
156 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
157 xs->sk.sk_write_space(&xs->sk);
158 }
159 rcu_read_unlock();
160}
161EXPORT_SYMBOL(xsk_umem_consume_tx_done);
162
163bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
164{
165 struct xdp_desc desc;
166 struct xdp_sock *xs;
167
168 rcu_read_lock();
169 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
170 if (!xskq_peek_desc(xs->tx, &desc))
171 continue;
172
173 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
174 goto out;
175
176 *dma = xdp_umem_get_dma(umem, desc.addr);
177 *len = desc.len;
178
179 xskq_discard_desc(xs->tx);
180 rcu_read_unlock();
181 return true;
182 }
183
184out:
185 rcu_read_unlock();
186 return false;
187}
188EXPORT_SYMBOL(xsk_umem_consume_tx);
189
190static int xsk_zc_xmit(struct sock *sk)
191{
192 struct xdp_sock *xs = xdp_sk(sk);
193 struct net_device *dev = xs->dev;
194
195 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
196}
197
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200198static void xsk_destruct_skb(struct sk_buff *skb)
199{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200200 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200201 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200202 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200203
Magnus Karlssona9744f72018-06-29 09:48:20 +0200204 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200205 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
Magnus Karlssona9744f72018-06-29 09:48:20 +0200206 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200207
208 sock_wfree(skb);
209}
210
211static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
212 size_t total_len)
213{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200214 u32 max_batch = TX_BATCH_SIZE;
215 struct xdp_sock *xs = xdp_sk(sk);
216 bool sent_frame = false;
217 struct xdp_desc desc;
218 struct sk_buff *skb;
219 int err = 0;
220
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200221 mutex_lock(&xs->mutex);
222
223 while (xskq_peek_desc(xs->tx, &desc)) {
224 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200225 u64 addr;
226 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200227
228 if (max_batch-- == 0) {
229 err = -EAGAIN;
230 goto out;
231 }
232
Magnus Karlsson9684f5e2018-07-11 10:12:50 +0200233 if (xskq_reserve_addr(xs->umem->cq))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200234 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200235
236 len = desc.len;
237 if (unlikely(len > xs->dev->mtu)) {
238 err = -EMSGSIZE;
239 goto out;
240 }
241
Magnus Karlsson509d7642018-07-11 10:12:49 +0200242 if (xs->queue_id >= xs->dev->real_num_tx_queues)
Magnus Karlsson2e59dd52018-05-22 09:34:58 +0200243 goto out;
Magnus Karlsson2e59dd52018-05-22 09:34:58 +0200244
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200245 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200246 if (unlikely(!skb)) {
247 err = -EAGAIN;
248 goto out;
249 }
250
251 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200252 addr = desc.addr;
253 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200254 err = skb_store_bits(skb, 0, buffer, len);
255 if (unlikely(err)) {
256 kfree_skb(skb);
257 goto out;
258 }
259
260 skb->dev = xs->dev;
261 skb->priority = sk->sk_priority;
262 skb->mark = sk->sk_mark;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200263 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200264 skb->destructor = xsk_destruct_skb;
265
266 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonfe588682018-06-29 09:48:18 +0200267 xskq_discard_desc(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200268 /* Ignore NET_XMIT_CN as packet might have been sent */
269 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200270 /* SKB completed but not sent */
271 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200272 goto out;
273 }
274
275 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200276 }
277
278out:
279 if (sent_frame)
280 sk->sk_write_space(sk);
281
282 mutex_unlock(&xs->mutex);
283 return err;
284}
285
286static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
287{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200288 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200289 struct sock *sk = sock->sk;
290 struct xdp_sock *xs = xdp_sk(sk);
291
292 if (unlikely(!xs->dev))
293 return -ENXIO;
294 if (unlikely(!(xs->dev->flags & IFF_UP)))
295 return -ENETDOWN;
Magnus Karlsson6efb4432018-07-11 10:12:51 +0200296 if (unlikely(!xs->tx))
297 return -ENOBUFS;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200298 if (need_wait)
299 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200300
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200301 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200302}
303
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700304static unsigned int xsk_poll(struct file *file, struct socket *sock,
305 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200306{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700307 unsigned int mask = datagram_poll(file, sock, wait);
Björn Töpelc4971762018-05-02 13:01:27 +0200308 struct sock *sk = sock->sk;
309 struct xdp_sock *xs = xdp_sk(sk);
310
311 if (xs->rx && !xskq_empty_desc(xs->rx))
312 mask |= POLLIN | POLLRDNORM;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200313 if (xs->tx && !xskq_full_desc(xs->tx))
314 mask |= POLLOUT | POLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200315
316 return mask;
317}
318
Björn Töpelb9b6b682018-05-02 13:01:25 +0200319static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
320 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200321{
322 struct xsk_queue *q;
323
324 if (entries == 0 || *queue || !is_power_of_2(entries))
325 return -EINVAL;
326
Björn Töpelb9b6b682018-05-02 13:01:25 +0200327 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200328 if (!q)
329 return -ENOMEM;
330
Björn Töpel37b07692018-05-22 09:35:01 +0200331 /* Make sure queue is ready before it can be seen by others */
332 smp_wmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200333 *queue = q;
334 return 0;
335}
336
Björn Töpelc0c77d82018-05-02 13:01:23 +0200337static int xsk_release(struct socket *sock)
338{
339 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200340 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200341 struct net *net;
342
343 if (!sk)
344 return 0;
345
346 net = sock_net(sk);
347
348 local_bh_disable();
349 sock_prot_inuse_add(net, sk->sk_prot, -1);
350 local_bh_enable();
351
Magnus Karlsson965a9902018-05-02 13:01:26 +0200352 if (xs->dev) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200353 /* Wait for driver to stop using the xdp socket. */
354 synchronize_net();
355 dev_put(xs->dev);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200356 xs->dev = NULL;
357 }
358
Björn Töpelc0c77d82018-05-02 13:01:23 +0200359 sock_orphan(sk);
360 sock->sk = NULL;
361
362 sk_refcnt_debug_release(sk);
363 sock_put(sk);
364
365 return 0;
366}
367
Magnus Karlsson965a9902018-05-02 13:01:26 +0200368static struct socket *xsk_lookup_xsk_from_fd(int fd)
369{
370 struct socket *sock;
371 int err;
372
373 sock = sockfd_lookup(fd, &err);
374 if (!sock)
375 return ERR_PTR(-ENOTSOCK);
376
377 if (sock->sk->sk_family != PF_XDP) {
378 sockfd_put(sock);
379 return ERR_PTR(-ENOPROTOOPT);
380 }
381
382 return sock;
383}
384
385static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
386{
387 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
388 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200389 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200390 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200391 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200392 int err = 0;
393
394 if (addr_len < sizeof(struct sockaddr_xdp))
395 return -EINVAL;
396 if (sxdp->sxdp_family != AF_XDP)
397 return -EINVAL;
398
399 mutex_lock(&xs->mutex);
Björn Töpel959b71d2018-05-22 09:34:56 +0200400 if (xs->dev) {
401 err = -EBUSY;
402 goto out_release;
403 }
404
Magnus Karlsson965a9902018-05-02 13:01:26 +0200405 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
406 if (!dev) {
407 err = -ENODEV;
408 goto out_release;
409 }
410
Magnus Karlssonf6145902018-05-02 13:01:32 +0200411 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200412 err = -EINVAL;
413 goto out_unlock;
414 }
415
Björn Töpel173d3ad2018-06-04 14:05:55 +0200416 qid = sxdp->sxdp_queue_id;
417
418 if ((xs->rx && qid >= dev->real_num_rx_queues) ||
419 (xs->tx && qid >= dev->real_num_tx_queues)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200420 err = -EINVAL;
421 goto out_unlock;
422 }
423
Björn Töpel173d3ad2018-06-04 14:05:55 +0200424 flags = sxdp->sxdp_flags;
425
426 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200427 struct xdp_sock *umem_xs;
428 struct socket *sock;
429
Björn Töpel173d3ad2018-06-04 14:05:55 +0200430 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
431 /* Cannot specify flags for shared sockets. */
432 err = -EINVAL;
433 goto out_unlock;
434 }
435
Magnus Karlsson965a9902018-05-02 13:01:26 +0200436 if (xs->umem) {
437 /* We have already our own. */
438 err = -EINVAL;
439 goto out_unlock;
440 }
441
442 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
443 if (IS_ERR(sock)) {
444 err = PTR_ERR(sock);
445 goto out_unlock;
446 }
447
448 umem_xs = xdp_sk(sock->sk);
449 if (!umem_xs->umem) {
450 /* No umem to inherit. */
451 err = -EBADF;
452 sockfd_put(sock);
453 goto out_unlock;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200454 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200455 err = -EINVAL;
456 sockfd_put(sock);
457 goto out_unlock;
458 }
459
460 xdp_get_umem(umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200461 xs->umem = umem_xs->umem;
462 sockfd_put(sock);
463 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
464 err = -EINVAL;
465 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200466 } else {
467 /* This xsk has its own umem. */
468 xskq_set_umem(xs->umem->fq, &xs->umem->props);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200469 xskq_set_umem(xs->umem->cq, &xs->umem->props);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200470
471 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
472 if (err)
473 goto out_unlock;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200474 }
475
Magnus Karlsson965a9902018-05-02 13:01:26 +0200476 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200477 xs->zc = xs->umem->zc;
478 xs->queue_id = qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200479 xskq_set_umem(xs->rx, &xs->umem->props);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200480 xskq_set_umem(xs->tx, &xs->umem->props);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200481 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200482
483out_unlock:
484 if (err)
485 dev_put(dev);
486out_release:
487 mutex_unlock(&xs->mutex);
488 return err;
489}
490
Björn Töpelc0c77d82018-05-02 13:01:23 +0200491static int xsk_setsockopt(struct socket *sock, int level, int optname,
492 char __user *optval, unsigned int optlen)
493{
494 struct sock *sk = sock->sk;
495 struct xdp_sock *xs = xdp_sk(sk);
496 int err;
497
498 if (level != SOL_XDP)
499 return -ENOPROTOOPT;
500
501 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200502 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200503 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200504 {
505 struct xsk_queue **q;
506 int entries;
507
508 if (optlen < sizeof(entries))
509 return -EINVAL;
510 if (copy_from_user(&entries, optval, sizeof(entries)))
511 return -EFAULT;
512
513 mutex_lock(&xs->mutex);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200514 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200515 err = xsk_init_queue(entries, q, false);
516 mutex_unlock(&xs->mutex);
517 return err;
518 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200519 case XDP_UMEM_REG:
520 {
521 struct xdp_umem_reg mr;
522 struct xdp_umem *umem;
523
Björn Töpelc0c77d82018-05-02 13:01:23 +0200524 if (copy_from_user(&mr, optval, sizeof(mr)))
525 return -EFAULT;
526
527 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200528 if (xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200529 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200530 return -EBUSY;
531 }
532
533 umem = xdp_umem_create(&mr);
534 if (IS_ERR(umem)) {
535 mutex_unlock(&xs->mutex);
536 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200537 }
538
539 /* Make sure umem is ready before it can be seen by others */
540 smp_wmb();
Björn Töpelc0c77d82018-05-02 13:01:23 +0200541 xs->umem = umem;
542 mutex_unlock(&xs->mutex);
543 return 0;
544 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200545 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200546 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200547 {
548 struct xsk_queue **q;
549 int entries;
550
Magnus Karlsson423f3832018-05-02 13:01:24 +0200551 if (copy_from_user(&entries, optval, sizeof(entries)))
552 return -EFAULT;
553
554 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200555 if (!xs->umem) {
556 mutex_unlock(&xs->mutex);
557 return -EINVAL;
558 }
559
Magnus Karlssonfe230832018-05-02 13:01:31 +0200560 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
561 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200562 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200563 mutex_unlock(&xs->mutex);
564 return err;
565 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200566 default:
567 break;
568 }
569
570 return -ENOPROTOOPT;
571}
572
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200573static int xsk_getsockopt(struct socket *sock, int level, int optname,
574 char __user *optval, int __user *optlen)
575{
576 struct sock *sk = sock->sk;
577 struct xdp_sock *xs = xdp_sk(sk);
578 int len;
579
580 if (level != SOL_XDP)
581 return -ENOPROTOOPT;
582
583 if (get_user(len, optlen))
584 return -EFAULT;
585 if (len < 0)
586 return -EINVAL;
587
588 switch (optname) {
589 case XDP_STATISTICS:
590 {
591 struct xdp_statistics stats;
592
593 if (len < sizeof(stats))
594 return -EINVAL;
595
596 mutex_lock(&xs->mutex);
597 stats.rx_dropped = xs->rx_dropped;
598 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
599 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
600 mutex_unlock(&xs->mutex);
601
602 if (copy_to_user(optval, &stats, sizeof(stats)))
603 return -EFAULT;
604 if (put_user(sizeof(stats), optlen))
605 return -EFAULT;
606
607 return 0;
608 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200609 case XDP_MMAP_OFFSETS:
610 {
611 struct xdp_mmap_offsets off;
612
613 if (len < sizeof(off))
614 return -EINVAL;
615
616 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
617 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
618 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
619 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
620 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
621 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
622
623 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
624 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
625 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
626 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
627 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
628 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
629
630 len = sizeof(off);
631 if (copy_to_user(optval, &off, len))
632 return -EFAULT;
633 if (put_user(len, optlen))
634 return -EFAULT;
635
636 return 0;
637 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200638 default:
639 break;
640 }
641
642 return -EOPNOTSUPP;
643}
644
Magnus Karlsson423f3832018-05-02 13:01:24 +0200645static int xsk_mmap(struct file *file, struct socket *sock,
646 struct vm_area_struct *vma)
647{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200648 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200649 unsigned long size = vma->vm_end - vma->vm_start;
650 struct xdp_sock *xs = xdp_sk(sock->sk);
651 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200652 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200653 unsigned long pfn;
654 struct page *qpg;
655
Björn Töpelb9b6b682018-05-02 13:01:25 +0200656 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200657 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200658 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200659 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200660 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200661 umem = READ_ONCE(xs->umem);
662 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200663 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200664
Björn Töpelb9b6b682018-05-02 13:01:25 +0200665 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200666 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200667 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200668 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200669 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200670
671 if (!q)
672 return -EINVAL;
673
674 qpg = virt_to_head_page(q->ring);
675 if (size > (PAGE_SIZE << compound_order(qpg)))
676 return -EINVAL;
677
678 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
679 return remap_pfn_range(vma, vma->vm_start, pfn,
680 size, vma->vm_page_prot);
681}
682
Björn Töpelc0c77d82018-05-02 13:01:23 +0200683static struct proto xsk_proto = {
684 .name = "XDP",
685 .owner = THIS_MODULE,
686 .obj_size = sizeof(struct xdp_sock),
687};
688
689static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +0200690 .family = PF_XDP,
691 .owner = THIS_MODULE,
692 .release = xsk_release,
693 .bind = xsk_bind,
694 .connect = sock_no_connect,
695 .socketpair = sock_no_socketpair,
696 .accept = sock_no_accept,
697 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700698 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +0200699 .ioctl = sock_no_ioctl,
700 .listen = sock_no_listen,
701 .shutdown = sock_no_shutdown,
702 .setsockopt = xsk_setsockopt,
703 .getsockopt = xsk_getsockopt,
704 .sendmsg = xsk_sendmsg,
705 .recvmsg = sock_no_recvmsg,
706 .mmap = xsk_mmap,
707 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +0200708};
709
710static void xsk_destruct(struct sock *sk)
711{
712 struct xdp_sock *xs = xdp_sk(sk);
713
714 if (!sock_flag(sk, SOCK_DEAD))
715 return;
716
Björn Töpelb9b6b682018-05-02 13:01:25 +0200717 xskq_destroy(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200718 xskq_destroy(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200719 xdp_del_sk_umem(xs->umem, xs);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200720 xdp_put_umem(xs->umem);
721
722 sk_refcnt_debug_dec(sk);
723}
724
725static int xsk_create(struct net *net, struct socket *sock, int protocol,
726 int kern)
727{
728 struct sock *sk;
729 struct xdp_sock *xs;
730
731 if (!ns_capable(net->user_ns, CAP_NET_RAW))
732 return -EPERM;
733 if (sock->type != SOCK_RAW)
734 return -ESOCKTNOSUPPORT;
735
736 if (protocol)
737 return -EPROTONOSUPPORT;
738
739 sock->state = SS_UNCONNECTED;
740
741 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
742 if (!sk)
743 return -ENOBUFS;
744
745 sock->ops = &xsk_proto_ops;
746
747 sock_init_data(sock, sk);
748
749 sk->sk_family = PF_XDP;
750
751 sk->sk_destruct = xsk_destruct;
752 sk_refcnt_debug_inc(sk);
753
754 xs = xdp_sk(sk);
755 mutex_init(&xs->mutex);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200756 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200757
758 local_bh_disable();
759 sock_prot_inuse_add(net, &xsk_proto, 1);
760 local_bh_enable();
761
762 return 0;
763}
764
765static const struct net_proto_family xsk_family_ops = {
766 .family = PF_XDP,
767 .create = xsk_create,
768 .owner = THIS_MODULE,
769};
770
771static int __init xsk_init(void)
772{
773 int err;
774
775 err = proto_register(&xsk_proto, 0 /* no slab */);
776 if (err)
777 goto out;
778
779 err = sock_register(&xsk_family_ops);
780 if (err)
781 goto out_proto;
782
783 return 0;
784
785out_proto:
786 proto_unregister(&xsk_proto);
787out:
788 return err;
789}
790
791fs_initcall(xsk_init);