blob: 3b3410ada0971abe28eb53ee57eca5307b314761 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
30
Magnus Karlsson35fcde72018-05-02 13:01:34 +020031#define TX_BATCH_SIZE 16
32
Björn Töpelc0c77d82018-05-02 13:01:23 +020033static struct xdp_sock *xdp_sk(struct sock *sk)
34{
35 return (struct xdp_sock *)sk;
36}
37
Björn Töpelfbfc5042018-05-02 13:01:28 +020038bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
39{
Björn Töpel173d3ad2018-06-04 14:05:55 +020040 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
41 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc5042018-05-02 13:01:28 +020042}
43
Björn Töpel173d3ad2018-06-04 14:05:55 +020044u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020045{
Björn Töpel173d3ad2018-06-04 14:05:55 +020046 return xskq_peek_addr(umem->fq, addr);
47}
48EXPORT_SYMBOL(xsk_umem_peek_addr);
49
50void xsk_umem_discard_addr(struct xdp_umem *umem)
51{
52 xskq_discard_addr(umem->fq);
53}
54EXPORT_SYMBOL(xsk_umem_discard_addr);
55
56static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
57{
Björn Töpelc4971762018-05-02 13:01:27 +020058 void *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +020059 u64 addr;
Björn Töpel4e64c832018-06-04 13:57:11 +020060 int err;
Björn Töpelc4971762018-05-02 13:01:27 +020061
Björn Töpelbbff2f32018-06-04 13:57:13 +020062 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
63 len > xs->umem->chunk_size_nohr) {
Björn Töpela509a952018-06-04 13:57:12 +020064 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020065 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +020066 }
Björn Töpelc4971762018-05-02 13:01:27 +020067
Björn Töpelbbff2f32018-06-04 13:57:13 +020068 addr += xs->umem->headroom;
69
70 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpelc4971762018-05-02 13:01:27 +020071 memcpy(buffer, xdp->data, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +020072 err = xskq_produce_batch_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +020073 if (!err) {
Björn Töpelbbff2f32018-06-04 13:57:13 +020074 xskq_discard_addr(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +020075 xdp_return_buff(xdp);
76 return 0;
77 }
78
79 xs->rx_dropped++;
80 return err;
81}
82
83static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
84{
85 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
86
87 if (err) {
88 xdp_return_buff(xdp);
Björn Töpela509a952018-06-04 13:57:12 +020089 xs->rx_dropped++;
Björn Töpel173d3ad2018-06-04 14:05:55 +020090 }
Björn Töpelc4971762018-05-02 13:01:27 +020091
92 return err;
93}
94
95int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
96{
Björn Töpel173d3ad2018-06-04 14:05:55 +020097 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +020098
Björn Töpel173d3ad2018-06-04 14:05:55 +020099 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
100 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200101
Björn Töpel173d3ad2018-06-04 14:05:55 +0200102 len = xdp->data_end - xdp->data;
103
104 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
105 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200106}
107
108void xsk_flush(struct xdp_sock *xs)
109{
110 xskq_produce_flush_desc(xs->rx);
111 xs->sk.sk_data_ready(&xs->sk);
112}
113
114int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
115{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200116 u32 len = xdp->data_end - xdp->data;
117 void *buffer;
118 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200119 int err;
120
Björn Töpel5d902372018-06-12 12:02:56 +0200121 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
122 return -EINVAL;
123
Björn Töpel173d3ad2018-06-04 14:05:55 +0200124 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
125 len > xs->umem->chunk_size_nohr) {
126 xs->rx_dropped++;
127 return -ENOSPC;
128 }
Björn Töpelc4971762018-05-02 13:01:27 +0200129
Björn Töpel173d3ad2018-06-04 14:05:55 +0200130 addr += xs->umem->headroom;
131
132 buffer = xdp_umem_get_data(xs->umem, addr);
133 memcpy(buffer, xdp->data, len);
134 err = xskq_produce_batch_desc(xs->rx, addr, len);
135 if (!err) {
136 xskq_discard_addr(xs->umem->fq);
137 xsk_flush(xs);
138 return 0;
139 }
140
141 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200142 return err;
143}
144
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200145void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
146{
147 xskq_produce_flush_addr_n(umem->cq, nb_entries);
148}
149EXPORT_SYMBOL(xsk_umem_complete_tx);
150
151void xsk_umem_consume_tx_done(struct xdp_umem *umem)
152{
153 struct xdp_sock *xs;
154
155 rcu_read_lock();
156 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
157 xs->sk.sk_write_space(&xs->sk);
158 }
159 rcu_read_unlock();
160}
161EXPORT_SYMBOL(xsk_umem_consume_tx_done);
162
163bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
164{
165 struct xdp_desc desc;
166 struct xdp_sock *xs;
167
168 rcu_read_lock();
169 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
170 if (!xskq_peek_desc(xs->tx, &desc))
171 continue;
172
173 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
174 goto out;
175
176 *dma = xdp_umem_get_dma(umem, desc.addr);
177 *len = desc.len;
178
179 xskq_discard_desc(xs->tx);
180 rcu_read_unlock();
181 return true;
182 }
183
184out:
185 rcu_read_unlock();
186 return false;
187}
188EXPORT_SYMBOL(xsk_umem_consume_tx);
189
190static int xsk_zc_xmit(struct sock *sk)
191{
192 struct xdp_sock *xs = xdp_sk(sk);
193 struct net_device *dev = xs->dev;
194
195 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
196}
197
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200198static void xsk_destruct_skb(struct sk_buff *skb)
199{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200200 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200201 struct xdp_sock *xs = xdp_sk(skb->sk);
202
Björn Töpelbbff2f32018-06-04 13:57:13 +0200203 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200204
205 sock_wfree(skb);
206}
207
208static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
209 size_t total_len)
210{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200211 u32 max_batch = TX_BATCH_SIZE;
212 struct xdp_sock *xs = xdp_sk(sk);
213 bool sent_frame = false;
214 struct xdp_desc desc;
215 struct sk_buff *skb;
216 int err = 0;
217
218 if (unlikely(!xs->tx))
219 return -ENOBUFS;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200220
221 mutex_lock(&xs->mutex);
222
223 while (xskq_peek_desc(xs->tx, &desc)) {
224 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200225 u64 addr;
226 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200227
228 if (max_batch-- == 0) {
229 err = -EAGAIN;
230 goto out;
231 }
232
Björn Töpelbbff2f32018-06-04 13:57:13 +0200233 if (xskq_reserve_addr(xs->umem->cq)) {
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200234 err = -EAGAIN;
235 goto out;
236 }
237
238 len = desc.len;
239 if (unlikely(len > xs->dev->mtu)) {
240 err = -EMSGSIZE;
241 goto out;
242 }
243
Magnus Karlsson2e59dd52018-05-22 09:34:58 +0200244 if (xs->queue_id >= xs->dev->real_num_tx_queues) {
245 err = -ENXIO;
246 goto out;
247 }
248
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200249 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200250 if (unlikely(!skb)) {
251 err = -EAGAIN;
252 goto out;
253 }
254
255 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200256 addr = desc.addr;
257 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200258 err = skb_store_bits(skb, 0, buffer, len);
259 if (unlikely(err)) {
260 kfree_skb(skb);
261 goto out;
262 }
263
264 skb->dev = xs->dev;
265 skb->priority = sk->sk_priority;
266 skb->mark = sk->sk_mark;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200267 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200268 skb->destructor = xsk_destruct_skb;
269
270 err = dev_direct_xmit(skb, xs->queue_id);
271 /* Ignore NET_XMIT_CN as packet might have been sent */
272 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
273 err = -EAGAIN;
274 /* SKB consumed by dev_direct_xmit() */
275 goto out;
276 }
277
278 sent_frame = true;
279 xskq_discard_desc(xs->tx);
280 }
281
282out:
283 if (sent_frame)
284 sk->sk_write_space(sk);
285
286 mutex_unlock(&xs->mutex);
287 return err;
288}
289
290static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
291{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200292 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200293 struct sock *sk = sock->sk;
294 struct xdp_sock *xs = xdp_sk(sk);
295
296 if (unlikely(!xs->dev))
297 return -ENXIO;
298 if (unlikely(!(xs->dev->flags & IFF_UP)))
299 return -ENETDOWN;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200300 if (need_wait)
301 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200302
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200303 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200304}
305
Linus Torvalds1c8c5a92018-06-06 18:39:49 -0700306static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events)
Björn Töpelc4971762018-05-02 13:01:27 +0200307{
Linus Torvalds1c8c5a92018-06-06 18:39:49 -0700308 __poll_t mask = datagram_poll_mask(sock, events);
Björn Töpelc4971762018-05-02 13:01:27 +0200309 struct sock *sk = sock->sk;
310 struct xdp_sock *xs = xdp_sk(sk);
311
312 if (xs->rx && !xskq_empty_desc(xs->rx))
313 mask |= POLLIN | POLLRDNORM;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200314 if (xs->tx && !xskq_full_desc(xs->tx))
315 mask |= POLLOUT | POLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200316
317 return mask;
318}
319
Björn Töpelb9b6b682018-05-02 13:01:25 +0200320static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
321 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200322{
323 struct xsk_queue *q;
324
325 if (entries == 0 || *queue || !is_power_of_2(entries))
326 return -EINVAL;
327
Björn Töpelb9b6b682018-05-02 13:01:25 +0200328 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200329 if (!q)
330 return -ENOMEM;
331
Björn Töpel37b07692018-05-22 09:35:01 +0200332 /* Make sure queue is ready before it can be seen by others */
333 smp_wmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200334 *queue = q;
335 return 0;
336}
337
Björn Töpelc0c77d82018-05-02 13:01:23 +0200338static int xsk_release(struct socket *sock)
339{
340 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200341 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200342 struct net *net;
343
344 if (!sk)
345 return 0;
346
347 net = sock_net(sk);
348
349 local_bh_disable();
350 sock_prot_inuse_add(net, sk->sk_prot, -1);
351 local_bh_enable();
352
Magnus Karlsson965a9902018-05-02 13:01:26 +0200353 if (xs->dev) {
Björn Töpel959b71d2018-05-22 09:34:56 +0200354 /* Wait for driver to stop using the xdp socket. */
355 synchronize_net();
356 dev_put(xs->dev);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200357 xs->dev = NULL;
358 }
359
Björn Töpelc0c77d82018-05-02 13:01:23 +0200360 sock_orphan(sk);
361 sock->sk = NULL;
362
363 sk_refcnt_debug_release(sk);
364 sock_put(sk);
365
366 return 0;
367}
368
Magnus Karlsson965a9902018-05-02 13:01:26 +0200369static struct socket *xsk_lookup_xsk_from_fd(int fd)
370{
371 struct socket *sock;
372 int err;
373
374 sock = sockfd_lookup(fd, &err);
375 if (!sock)
376 return ERR_PTR(-ENOTSOCK);
377
378 if (sock->sk->sk_family != PF_XDP) {
379 sockfd_put(sock);
380 return ERR_PTR(-ENOPROTOOPT);
381 }
382
383 return sock;
384}
385
386static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
387{
388 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
389 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200390 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200391 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200392 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200393 int err = 0;
394
395 if (addr_len < sizeof(struct sockaddr_xdp))
396 return -EINVAL;
397 if (sxdp->sxdp_family != AF_XDP)
398 return -EINVAL;
399
400 mutex_lock(&xs->mutex);
Björn Töpel959b71d2018-05-22 09:34:56 +0200401 if (xs->dev) {
402 err = -EBUSY;
403 goto out_release;
404 }
405
Magnus Karlsson965a9902018-05-02 13:01:26 +0200406 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
407 if (!dev) {
408 err = -ENODEV;
409 goto out_release;
410 }
411
Magnus Karlssonf6145902018-05-02 13:01:32 +0200412 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200413 err = -EINVAL;
414 goto out_unlock;
415 }
416
Björn Töpel173d3ad2018-06-04 14:05:55 +0200417 qid = sxdp->sxdp_queue_id;
418
419 if ((xs->rx && qid >= dev->real_num_rx_queues) ||
420 (xs->tx && qid >= dev->real_num_tx_queues)) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200421 err = -EINVAL;
422 goto out_unlock;
423 }
424
Björn Töpel173d3ad2018-06-04 14:05:55 +0200425 flags = sxdp->sxdp_flags;
426
427 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200428 struct xdp_sock *umem_xs;
429 struct socket *sock;
430
Björn Töpel173d3ad2018-06-04 14:05:55 +0200431 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
432 /* Cannot specify flags for shared sockets. */
433 err = -EINVAL;
434 goto out_unlock;
435 }
436
Magnus Karlsson965a9902018-05-02 13:01:26 +0200437 if (xs->umem) {
438 /* We have already our own. */
439 err = -EINVAL;
440 goto out_unlock;
441 }
442
443 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
444 if (IS_ERR(sock)) {
445 err = PTR_ERR(sock);
446 goto out_unlock;
447 }
448
449 umem_xs = xdp_sk(sock->sk);
450 if (!umem_xs->umem) {
451 /* No umem to inherit. */
452 err = -EBADF;
453 sockfd_put(sock);
454 goto out_unlock;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200455 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200456 err = -EINVAL;
457 sockfd_put(sock);
458 goto out_unlock;
459 }
460
461 xdp_get_umem(umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200462 xs->umem = umem_xs->umem;
463 sockfd_put(sock);
464 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
465 err = -EINVAL;
466 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200467 } else {
468 /* This xsk has its own umem. */
469 xskq_set_umem(xs->umem->fq, &xs->umem->props);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200470 xskq_set_umem(xs->umem->cq, &xs->umem->props);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200471
472 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
473 if (err)
474 goto out_unlock;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200475 }
476
Magnus Karlsson965a9902018-05-02 13:01:26 +0200477 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200478 xs->zc = xs->umem->zc;
479 xs->queue_id = qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200480 xskq_set_umem(xs->rx, &xs->umem->props);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200481 xskq_set_umem(xs->tx, &xs->umem->props);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200482 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200483
484out_unlock:
485 if (err)
486 dev_put(dev);
487out_release:
488 mutex_unlock(&xs->mutex);
489 return err;
490}
491
Björn Töpelc0c77d82018-05-02 13:01:23 +0200492static int xsk_setsockopt(struct socket *sock, int level, int optname,
493 char __user *optval, unsigned int optlen)
494{
495 struct sock *sk = sock->sk;
496 struct xdp_sock *xs = xdp_sk(sk);
497 int err;
498
499 if (level != SOL_XDP)
500 return -ENOPROTOOPT;
501
502 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200503 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200504 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200505 {
506 struct xsk_queue **q;
507 int entries;
508
509 if (optlen < sizeof(entries))
510 return -EINVAL;
511 if (copy_from_user(&entries, optval, sizeof(entries)))
512 return -EFAULT;
513
514 mutex_lock(&xs->mutex);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200515 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200516 err = xsk_init_queue(entries, q, false);
517 mutex_unlock(&xs->mutex);
518 return err;
519 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200520 case XDP_UMEM_REG:
521 {
522 struct xdp_umem_reg mr;
523 struct xdp_umem *umem;
524
Björn Töpelc0c77d82018-05-02 13:01:23 +0200525 if (copy_from_user(&mr, optval, sizeof(mr)))
526 return -EFAULT;
527
528 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200529 if (xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200530 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200531 return -EBUSY;
532 }
533
534 umem = xdp_umem_create(&mr);
535 if (IS_ERR(umem)) {
536 mutex_unlock(&xs->mutex);
537 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200538 }
539
540 /* Make sure umem is ready before it can be seen by others */
541 smp_wmb();
Björn Töpelc0c77d82018-05-02 13:01:23 +0200542 xs->umem = umem;
543 mutex_unlock(&xs->mutex);
544 return 0;
545 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200546 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200547 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200548 {
549 struct xsk_queue **q;
550 int entries;
551
Magnus Karlsson423f3832018-05-02 13:01:24 +0200552 if (copy_from_user(&entries, optval, sizeof(entries)))
553 return -EFAULT;
554
555 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200556 if (!xs->umem) {
557 mutex_unlock(&xs->mutex);
558 return -EINVAL;
559 }
560
Magnus Karlssonfe230832018-05-02 13:01:31 +0200561 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
562 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200563 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200564 mutex_unlock(&xs->mutex);
565 return err;
566 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200567 default:
568 break;
569 }
570
571 return -ENOPROTOOPT;
572}
573
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200574static int xsk_getsockopt(struct socket *sock, int level, int optname,
575 char __user *optval, int __user *optlen)
576{
577 struct sock *sk = sock->sk;
578 struct xdp_sock *xs = xdp_sk(sk);
579 int len;
580
581 if (level != SOL_XDP)
582 return -ENOPROTOOPT;
583
584 if (get_user(len, optlen))
585 return -EFAULT;
586 if (len < 0)
587 return -EINVAL;
588
589 switch (optname) {
590 case XDP_STATISTICS:
591 {
592 struct xdp_statistics stats;
593
594 if (len < sizeof(stats))
595 return -EINVAL;
596
597 mutex_lock(&xs->mutex);
598 stats.rx_dropped = xs->rx_dropped;
599 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
600 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
601 mutex_unlock(&xs->mutex);
602
603 if (copy_to_user(optval, &stats, sizeof(stats)))
604 return -EFAULT;
605 if (put_user(sizeof(stats), optlen))
606 return -EFAULT;
607
608 return 0;
609 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200610 case XDP_MMAP_OFFSETS:
611 {
612 struct xdp_mmap_offsets off;
613
614 if (len < sizeof(off))
615 return -EINVAL;
616
617 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
618 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
619 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
620 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
621 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
622 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
623
624 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
625 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
626 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
627 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
628 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
629 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
630
631 len = sizeof(off);
632 if (copy_to_user(optval, &off, len))
633 return -EFAULT;
634 if (put_user(len, optlen))
635 return -EFAULT;
636
637 return 0;
638 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200639 default:
640 break;
641 }
642
643 return -EOPNOTSUPP;
644}
645
Magnus Karlsson423f3832018-05-02 13:01:24 +0200646static int xsk_mmap(struct file *file, struct socket *sock,
647 struct vm_area_struct *vma)
648{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200649 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200650 unsigned long size = vma->vm_end - vma->vm_start;
651 struct xdp_sock *xs = xdp_sk(sock->sk);
652 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200653 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200654 unsigned long pfn;
655 struct page *qpg;
656
Björn Töpelb9b6b682018-05-02 13:01:25 +0200657 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200658 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200659 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200660 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200661 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200662 umem = READ_ONCE(xs->umem);
663 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200664 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200665
Björn Töpelb9b6b682018-05-02 13:01:25 +0200666 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200667 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200668 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200669 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200670 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200671
672 if (!q)
673 return -EINVAL;
674
675 qpg = virt_to_head_page(q->ring);
676 if (size > (PAGE_SIZE << compound_order(qpg)))
677 return -EINVAL;
678
679 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
680 return remap_pfn_range(vma, vma->vm_start, pfn,
681 size, vma->vm_page_prot);
682}
683
Björn Töpelc0c77d82018-05-02 13:01:23 +0200684static struct proto xsk_proto = {
685 .name = "XDP",
686 .owner = THIS_MODULE,
687 .obj_size = sizeof(struct xdp_sock),
688};
689
690static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +0200691 .family = PF_XDP,
692 .owner = THIS_MODULE,
693 .release = xsk_release,
694 .bind = xsk_bind,
695 .connect = sock_no_connect,
696 .socketpair = sock_no_socketpair,
697 .accept = sock_no_accept,
698 .getname = sock_no_getname,
Linus Torvalds1c8c5a92018-06-06 18:39:49 -0700699 .poll_mask = xsk_poll_mask,
Björn Töpelc2f43742018-05-18 14:00:24 +0200700 .ioctl = sock_no_ioctl,
701 .listen = sock_no_listen,
702 .shutdown = sock_no_shutdown,
703 .setsockopt = xsk_setsockopt,
704 .getsockopt = xsk_getsockopt,
705 .sendmsg = xsk_sendmsg,
706 .recvmsg = sock_no_recvmsg,
707 .mmap = xsk_mmap,
708 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +0200709};
710
711static void xsk_destruct(struct sock *sk)
712{
713 struct xdp_sock *xs = xdp_sk(sk);
714
715 if (!sock_flag(sk, SOCK_DEAD))
716 return;
717
Björn Töpelb9b6b682018-05-02 13:01:25 +0200718 xskq_destroy(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200719 xskq_destroy(xs->tx);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200720 xdp_del_sk_umem(xs->umem, xs);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200721 xdp_put_umem(xs->umem);
722
723 sk_refcnt_debug_dec(sk);
724}
725
726static int xsk_create(struct net *net, struct socket *sock, int protocol,
727 int kern)
728{
729 struct sock *sk;
730 struct xdp_sock *xs;
731
732 if (!ns_capable(net->user_ns, CAP_NET_RAW))
733 return -EPERM;
734 if (sock->type != SOCK_RAW)
735 return -ESOCKTNOSUPPORT;
736
737 if (protocol)
738 return -EPROTONOSUPPORT;
739
740 sock->state = SS_UNCONNECTED;
741
742 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
743 if (!sk)
744 return -ENOBUFS;
745
746 sock->ops = &xsk_proto_ops;
747
748 sock_init_data(sock, sk);
749
750 sk->sk_family = PF_XDP;
751
752 sk->sk_destruct = xsk_destruct;
753 sk_refcnt_debug_inc(sk);
754
755 xs = xdp_sk(sk);
756 mutex_init(&xs->mutex);
757
758 local_bh_disable();
759 sock_prot_inuse_add(net, &xsk_proto, 1);
760 local_bh_enable();
761
762 return 0;
763}
764
765static const struct net_proto_family xsk_family_ops = {
766 .family = PF_XDP,
767 .create = xsk_create,
768 .owner = THIS_MODULE,
769};
770
771static int __init xsk_init(void)
772{
773 int err;
774
775 err = proto_register(&xsk_proto, 0 /* no slab */);
776 if (err)
777 goto out;
778
779 err = sock_register(&xsk_family_ops);
780 if (err)
781 goto out_proto;
782
783 return 0;
784
785out_proto:
786 proto_unregister(&xsk_proto);
787out:
788 return err;
789}
790
791fs_initcall(xsk_init);