blob: 07156f43d29593ffad99dbd14827fc6352969413 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
Björn Töpelc0c77d82018-05-02 13:01:23 +02008 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020024#include <linux/rculist.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020025#include <net/xdp_sock.h>
Björn Töpelb9b6b682018-05-02 13:01:25 +020026#include <net/xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020027
Magnus Karlsson423f3832018-05-02 13:01:24 +020028#include "xsk_queue.h"
Björn Töpelc0c77d82018-05-02 13:01:23 +020029#include "xdp_umem.h"
30
Magnus Karlsson35fcde72018-05-02 13:01:34 +020031#define TX_BATCH_SIZE 16
32
Björn Töpelc0c77d82018-05-02 13:01:23 +020033static struct xdp_sock *xdp_sk(struct sock *sk)
34{
35 return (struct xdp_sock *)sk;
36}
37
Björn Töpelfbfc5042018-05-02 13:01:28 +020038bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
39{
Björn Töpel173d3ad2018-06-04 14:05:55 +020040 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
41 READ_ONCE(xs->umem->fq);
Björn Töpelfbfc5042018-05-02 13:01:28 +020042}
43
Björn Töpel173d3ad2018-06-04 14:05:55 +020044u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
Björn Töpelc4971762018-05-02 13:01:27 +020045{
Björn Töpel173d3ad2018-06-04 14:05:55 +020046 return xskq_peek_addr(umem->fq, addr);
47}
48EXPORT_SYMBOL(xsk_umem_peek_addr);
49
50void xsk_umem_discard_addr(struct xdp_umem *umem)
51{
52 xskq_discard_addr(umem->fq);
53}
54EXPORT_SYMBOL(xsk_umem_discard_addr);
55
56static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
57{
Björn Töpel18baed22018-08-30 15:12:48 +020058 void *to_buf, *from_buf;
59 u32 metalen;
Björn Töpelbbff2f32018-06-04 13:57:13 +020060 u64 addr;
Björn Töpel4e64c832018-06-04 13:57:11 +020061 int err;
Björn Töpelc4971762018-05-02 13:01:27 +020062
Björn Töpelbbff2f32018-06-04 13:57:13 +020063 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
Björn Töpel18baed22018-08-30 15:12:48 +020064 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpela509a952018-06-04 13:57:12 +020065 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020066 return -ENOSPC;
Björn Töpela509a952018-06-04 13:57:12 +020067 }
Björn Töpelc4971762018-05-02 13:01:27 +020068
Björn Töpelbbff2f32018-06-04 13:57:13 +020069 addr += xs->umem->headroom;
70
Björn Töpel18baed22018-08-30 15:12:48 +020071 if (unlikely(xdp_data_meta_unsupported(xdp))) {
72 from_buf = xdp->data;
73 metalen = 0;
74 } else {
75 from_buf = xdp->data_meta;
76 metalen = xdp->data - xdp->data_meta;
77 }
78
79 to_buf = xdp_umem_get_data(xs->umem, addr);
80 memcpy(to_buf, from_buf, len + metalen);
81 addr += metalen;
Björn Töpelbbff2f32018-06-04 13:57:13 +020082 err = xskq_produce_batch_desc(xs->rx, addr, len);
Björn Töpel173d3ad2018-06-04 14:05:55 +020083 if (!err) {
Björn Töpelbbff2f32018-06-04 13:57:13 +020084 xskq_discard_addr(xs->umem->fq);
Björn Töpel173d3ad2018-06-04 14:05:55 +020085 xdp_return_buff(xdp);
86 return 0;
87 }
88
89 xs->rx_dropped++;
90 return err;
91}
92
93static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
94{
95 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
96
Jakub Kicinski2d55d612018-07-27 20:20:08 -070097 if (err)
Björn Töpela509a952018-06-04 13:57:12 +020098 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +020099
100 return err;
101}
102
103int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
104{
Björn Töpel173d3ad2018-06-04 14:05:55 +0200105 u32 len;
Björn Töpelc4971762018-05-02 13:01:27 +0200106
Björn Töpel173d3ad2018-06-04 14:05:55 +0200107 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
108 return -EINVAL;
Björn Töpelc4971762018-05-02 13:01:27 +0200109
Björn Töpel173d3ad2018-06-04 14:05:55 +0200110 len = xdp->data_end - xdp->data;
111
112 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
113 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
Björn Töpelc4971762018-05-02 13:01:27 +0200114}
115
116void xsk_flush(struct xdp_sock *xs)
117{
118 xskq_produce_flush_desc(xs->rx);
119 xs->sk.sk_data_ready(&xs->sk);
120}
121
122int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
123{
Björn Töpel18baed22018-08-30 15:12:48 +0200124 u32 metalen = xdp->data - xdp->data_meta;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200125 u32 len = xdp->data_end - xdp->data;
126 void *buffer;
127 u64 addr;
Björn Töpelc4971762018-05-02 13:01:27 +0200128 int err;
129
Björn Töpel5d902372018-06-12 12:02:56 +0200130 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
131 return -EINVAL;
132
Björn Töpel173d3ad2018-06-04 14:05:55 +0200133 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
Björn Töpel18baed22018-08-30 15:12:48 +0200134 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
Björn Töpel173d3ad2018-06-04 14:05:55 +0200135 xs->rx_dropped++;
136 return -ENOSPC;
137 }
Björn Töpelc4971762018-05-02 13:01:27 +0200138
Björn Töpel173d3ad2018-06-04 14:05:55 +0200139 addr += xs->umem->headroom;
140
141 buffer = xdp_umem_get_data(xs->umem, addr);
Björn Töpel18baed22018-08-30 15:12:48 +0200142 memcpy(buffer, xdp->data_meta, len + metalen);
143 addr += metalen;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200144 err = xskq_produce_batch_desc(xs->rx, addr, len);
145 if (!err) {
146 xskq_discard_addr(xs->umem->fq);
147 xsk_flush(xs);
148 return 0;
149 }
150
151 xs->rx_dropped++;
Björn Töpelc4971762018-05-02 13:01:27 +0200152 return err;
153}
154
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200155void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
156{
157 xskq_produce_flush_addr_n(umem->cq, nb_entries);
158}
159EXPORT_SYMBOL(xsk_umem_complete_tx);
160
161void xsk_umem_consume_tx_done(struct xdp_umem *umem)
162{
163 struct xdp_sock *xs;
164
165 rcu_read_lock();
166 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
167 xs->sk.sk_write_space(&xs->sk);
168 }
169 rcu_read_unlock();
170}
171EXPORT_SYMBOL(xsk_umem_consume_tx_done);
172
173bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
174{
175 struct xdp_desc desc;
176 struct xdp_sock *xs;
177
178 rcu_read_lock();
179 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
180 if (!xskq_peek_desc(xs->tx, &desc))
181 continue;
182
183 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
184 goto out;
185
186 *dma = xdp_umem_get_dma(umem, desc.addr);
187 *len = desc.len;
188
189 xskq_discard_desc(xs->tx);
190 rcu_read_unlock();
191 return true;
192 }
193
194out:
195 rcu_read_unlock();
196 return false;
197}
198EXPORT_SYMBOL(xsk_umem_consume_tx);
199
200static int xsk_zc_xmit(struct sock *sk)
201{
202 struct xdp_sock *xs = xdp_sk(sk);
203 struct net_device *dev = xs->dev;
204
205 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
206}
207
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200208static void xsk_destruct_skb(struct sk_buff *skb)
209{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200210 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200211 struct xdp_sock *xs = xdp_sk(skb->sk);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200212 unsigned long flags;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200213
Magnus Karlssona9744f72018-06-29 09:48:20 +0200214 spin_lock_irqsave(&xs->tx_completion_lock, flags);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200215 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
Magnus Karlssona9744f72018-06-29 09:48:20 +0200216 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200217
218 sock_wfree(skb);
219}
220
221static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
222 size_t total_len)
223{
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200224 u32 max_batch = TX_BATCH_SIZE;
225 struct xdp_sock *xs = xdp_sk(sk);
226 bool sent_frame = false;
227 struct xdp_desc desc;
228 struct sk_buff *skb;
229 int err = 0;
230
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200231 mutex_lock(&xs->mutex);
232
233 while (xskq_peek_desc(xs->tx, &desc)) {
234 char *buffer;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200235 u64 addr;
236 u32 len;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200237
238 if (max_batch-- == 0) {
239 err = -EAGAIN;
240 goto out;
241 }
242
Magnus Karlsson9684f5e2018-07-11 10:12:50 +0200243 if (xskq_reserve_addr(xs->umem->cq))
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200244 goto out;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200245
Magnus Karlsson509d7642018-07-11 10:12:49 +0200246 if (xs->queue_id >= xs->dev->real_num_tx_queues)
Magnus Karlsson2e59dd52018-05-22 09:34:58 +0200247 goto out;
Magnus Karlsson2e59dd52018-05-22 09:34:58 +0200248
Magnus Karlsson09210c42018-07-11 10:12:52 +0200249 len = desc.len;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200250 skb = sock_alloc_send_skb(sk, len, 1, &err);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200251 if (unlikely(!skb)) {
252 err = -EAGAIN;
253 goto out;
254 }
255
256 skb_put(skb, len);
Björn Töpelbbff2f32018-06-04 13:57:13 +0200257 addr = desc.addr;
258 buffer = xdp_umem_get_data(xs->umem, addr);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200259 err = skb_store_bits(skb, 0, buffer, len);
260 if (unlikely(err)) {
261 kfree_skb(skb);
262 goto out;
263 }
264
265 skb->dev = xs->dev;
266 skb->priority = sk->sk_priority;
267 skb->mark = sk->sk_mark;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200268 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200269 skb->destructor = xsk_destruct_skb;
270
271 err = dev_direct_xmit(skb, xs->queue_id);
Magnus Karlssonfe588682018-06-29 09:48:18 +0200272 xskq_discard_desc(xs->tx);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200273 /* Ignore NET_XMIT_CN as packet might have been sent */
274 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
Magnus Karlssonfe588682018-06-29 09:48:18 +0200275 /* SKB completed but not sent */
276 err = -EBUSY;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200277 goto out;
278 }
279
280 sent_frame = true;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200281 }
282
283out:
284 if (sent_frame)
285 sk->sk_write_space(sk);
286
287 mutex_unlock(&xs->mutex);
288 return err;
289}
290
291static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
292{
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200293 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200294 struct sock *sk = sock->sk;
295 struct xdp_sock *xs = xdp_sk(sk);
296
297 if (unlikely(!xs->dev))
298 return -ENXIO;
299 if (unlikely(!(xs->dev->flags & IFF_UP)))
300 return -ENETDOWN;
Magnus Karlsson6efb4432018-07-11 10:12:51 +0200301 if (unlikely(!xs->tx))
302 return -ENOBUFS;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200303 if (need_wait)
304 return -EOPNOTSUPP;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200305
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200306 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200307}
308
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700309static unsigned int xsk_poll(struct file *file, struct socket *sock,
310 struct poll_table_struct *wait)
Björn Töpelc4971762018-05-02 13:01:27 +0200311{
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700312 unsigned int mask = datagram_poll(file, sock, wait);
Björn Töpelc4971762018-05-02 13:01:27 +0200313 struct sock *sk = sock->sk;
314 struct xdp_sock *xs = xdp_sk(sk);
315
316 if (xs->rx && !xskq_empty_desc(xs->rx))
317 mask |= POLLIN | POLLRDNORM;
Magnus Karlsson35fcde72018-05-02 13:01:34 +0200318 if (xs->tx && !xskq_full_desc(xs->tx))
319 mask |= POLLOUT | POLLWRNORM;
Björn Töpelc4971762018-05-02 13:01:27 +0200320
321 return mask;
322}
323
Björn Töpelb9b6b682018-05-02 13:01:25 +0200324static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
325 bool umem_queue)
Magnus Karlsson423f3832018-05-02 13:01:24 +0200326{
327 struct xsk_queue *q;
328
329 if (entries == 0 || *queue || !is_power_of_2(entries))
330 return -EINVAL;
331
Björn Töpelb9b6b682018-05-02 13:01:25 +0200332 q = xskq_create(entries, umem_queue);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200333 if (!q)
334 return -ENOMEM;
335
Björn Töpel37b07692018-05-22 09:35:01 +0200336 /* Make sure queue is ready before it can be seen by others */
337 smp_wmb();
Magnus Karlsson423f3832018-05-02 13:01:24 +0200338 *queue = q;
339 return 0;
340}
341
Björn Töpelc0c77d82018-05-02 13:01:23 +0200342static int xsk_release(struct socket *sock)
343{
344 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200345 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200346 struct net *net;
347
348 if (!sk)
349 return 0;
350
351 net = sock_net(sk);
352
353 local_bh_disable();
354 sock_prot_inuse_add(net, sk->sk_prot, -1);
355 local_bh_enable();
356
Magnus Karlsson965a9902018-05-02 13:01:26 +0200357 if (xs->dev) {
Björn Töpel541d7fd2018-10-05 13:25:15 +0200358 struct net_device *dev = xs->dev;
359
Björn Töpel959b71d2018-05-22 09:34:56 +0200360 /* Wait for driver to stop using the xdp socket. */
Björn Töpel541d7fd2018-10-05 13:25:15 +0200361 xdp_del_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200362 xs->dev = NULL;
Björn Töpel541d7fd2018-10-05 13:25:15 +0200363 synchronize_net();
364 dev_put(dev);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200365 }
366
Björn Töpel541d7fd2018-10-05 13:25:15 +0200367 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx);
369
Björn Töpelc0c77d82018-05-02 13:01:23 +0200370 sock_orphan(sk);
371 sock->sk = NULL;
372
373 sk_refcnt_debug_release(sk);
374 sock_put(sk);
375
376 return 0;
377}
378
Magnus Karlsson965a9902018-05-02 13:01:26 +0200379static struct socket *xsk_lookup_xsk_from_fd(int fd)
380{
381 struct socket *sock;
382 int err;
383
384 sock = sockfd_lookup(fd, &err);
385 if (!sock)
386 return ERR_PTR(-ENOTSOCK);
387
388 if (sock->sk->sk_family != PF_XDP) {
389 sockfd_put(sock);
390 return ERR_PTR(-ENOPROTOOPT);
391 }
392
393 return sock;
394}
395
396static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
397{
398 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
399 struct sock *sk = sock->sk;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200400 struct xdp_sock *xs = xdp_sk(sk);
Björn Töpel959b71d2018-05-22 09:34:56 +0200401 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200402 u32 flags, qid;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200403 int err = 0;
404
405 if (addr_len < sizeof(struct sockaddr_xdp))
406 return -EINVAL;
407 if (sxdp->sxdp_family != AF_XDP)
408 return -EINVAL;
409
410 mutex_lock(&xs->mutex);
Björn Töpel959b71d2018-05-22 09:34:56 +0200411 if (xs->dev) {
412 err = -EBUSY;
413 goto out_release;
414 }
415
Magnus Karlsson965a9902018-05-02 13:01:26 +0200416 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
417 if (!dev) {
418 err = -ENODEV;
419 goto out_release;
420 }
421
Magnus Karlssonf6145902018-05-02 13:01:32 +0200422 if (!xs->rx && !xs->tx) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200423 err = -EINVAL;
424 goto out_unlock;
425 }
426
Björn Töpel173d3ad2018-06-04 14:05:55 +0200427 qid = sxdp->sxdp_queue_id;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200428 flags = sxdp->sxdp_flags;
429
430 if (flags & XDP_SHARED_UMEM) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200431 struct xdp_sock *umem_xs;
432 struct socket *sock;
433
Björn Töpel173d3ad2018-06-04 14:05:55 +0200434 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
435 /* Cannot specify flags for shared sockets. */
436 err = -EINVAL;
437 goto out_unlock;
438 }
439
Magnus Karlsson965a9902018-05-02 13:01:26 +0200440 if (xs->umem) {
441 /* We have already our own. */
442 err = -EINVAL;
443 goto out_unlock;
444 }
445
446 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
447 if (IS_ERR(sock)) {
448 err = PTR_ERR(sock);
449 goto out_unlock;
450 }
451
452 umem_xs = xdp_sk(sock->sk);
453 if (!umem_xs->umem) {
454 /* No umem to inherit. */
455 err = -EBADF;
456 sockfd_put(sock);
457 goto out_unlock;
Björn Töpel173d3ad2018-06-04 14:05:55 +0200458 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
Magnus Karlsson965a9902018-05-02 13:01:26 +0200459 err = -EINVAL;
460 sockfd_put(sock);
461 goto out_unlock;
462 }
463
464 xdp_get_umem(umem_xs->umem);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200465 xs->umem = umem_xs->umem;
466 sockfd_put(sock);
467 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
468 err = -EINVAL;
469 goto out_unlock;
Björn Töpelc4971762018-05-02 13:01:27 +0200470 } else {
471 /* This xsk has its own umem. */
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200472 xskq_set_umem(xs->umem->fq, xs->umem->size,
473 xs->umem->chunk_mask);
474 xskq_set_umem(xs->umem->cq, xs->umem->size,
475 xs->umem->chunk_mask);
Björn Töpel173d3ad2018-06-04 14:05:55 +0200476
477 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
478 if (err)
479 goto out_unlock;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200480 }
481
Magnus Karlsson965a9902018-05-02 13:01:26 +0200482 xs->dev = dev;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200483 xs->zc = xs->umem->zc;
484 xs->queue_id = qid;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +0200485 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
486 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +0200487 xdp_add_sk_umem(xs->umem, xs);
Magnus Karlsson965a9902018-05-02 13:01:26 +0200488
489out_unlock:
490 if (err)
491 dev_put(dev);
492out_release:
493 mutex_unlock(&xs->mutex);
494 return err;
495}
496
Björn Töpelc0c77d82018-05-02 13:01:23 +0200497static int xsk_setsockopt(struct socket *sock, int level, int optname,
498 char __user *optval, unsigned int optlen)
499{
500 struct sock *sk = sock->sk;
501 struct xdp_sock *xs = xdp_sk(sk);
502 int err;
503
504 if (level != SOL_XDP)
505 return -ENOPROTOOPT;
506
507 switch (optname) {
Björn Töpelb9b6b682018-05-02 13:01:25 +0200508 case XDP_RX_RING:
Magnus Karlssonf6145902018-05-02 13:01:32 +0200509 case XDP_TX_RING:
Björn Töpelb9b6b682018-05-02 13:01:25 +0200510 {
511 struct xsk_queue **q;
512 int entries;
513
514 if (optlen < sizeof(entries))
515 return -EINVAL;
516 if (copy_from_user(&entries, optval, sizeof(entries)))
517 return -EFAULT;
518
519 mutex_lock(&xs->mutex);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200520 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200521 err = xsk_init_queue(entries, q, false);
522 mutex_unlock(&xs->mutex);
523 return err;
524 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200525 case XDP_UMEM_REG:
526 {
527 struct xdp_umem_reg mr;
528 struct xdp_umem *umem;
529
Björn Töpelc0c77d82018-05-02 13:01:23 +0200530 if (copy_from_user(&mr, optval, sizeof(mr)))
531 return -EFAULT;
532
533 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200534 if (xs->umem) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200535 mutex_unlock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200536 return -EBUSY;
537 }
538
539 umem = xdp_umem_create(&mr);
540 if (IS_ERR(umem)) {
541 mutex_unlock(&xs->mutex);
542 return PTR_ERR(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200543 }
544
545 /* Make sure umem is ready before it can be seen by others */
546 smp_wmb();
Björn Töpelc0c77d82018-05-02 13:01:23 +0200547 xs->umem = umem;
548 mutex_unlock(&xs->mutex);
549 return 0;
550 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200551 case XDP_UMEM_FILL_RING:
Magnus Karlssonfe230832018-05-02 13:01:31 +0200552 case XDP_UMEM_COMPLETION_RING:
Magnus Karlsson423f3832018-05-02 13:01:24 +0200553 {
554 struct xsk_queue **q;
555 int entries;
556
Magnus Karlsson423f3832018-05-02 13:01:24 +0200557 if (copy_from_user(&entries, optval, sizeof(entries)))
558 return -EFAULT;
559
560 mutex_lock(&xs->mutex);
Björn Töpela49049e2018-05-22 09:35:02 +0200561 if (!xs->umem) {
562 mutex_unlock(&xs->mutex);
563 return -EINVAL;
564 }
565
Magnus Karlssonfe230832018-05-02 13:01:31 +0200566 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
567 &xs->umem->cq;
Björn Töpelb9b6b682018-05-02 13:01:25 +0200568 err = xsk_init_queue(entries, q, true);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200569 mutex_unlock(&xs->mutex);
570 return err;
571 }
Björn Töpelc0c77d82018-05-02 13:01:23 +0200572 default:
573 break;
574 }
575
576 return -ENOPROTOOPT;
577}
578
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200579static int xsk_getsockopt(struct socket *sock, int level, int optname,
580 char __user *optval, int __user *optlen)
581{
582 struct sock *sk = sock->sk;
583 struct xdp_sock *xs = xdp_sk(sk);
584 int len;
585
586 if (level != SOL_XDP)
587 return -ENOPROTOOPT;
588
589 if (get_user(len, optlen))
590 return -EFAULT;
591 if (len < 0)
592 return -EINVAL;
593
594 switch (optname) {
595 case XDP_STATISTICS:
596 {
597 struct xdp_statistics stats;
598
599 if (len < sizeof(stats))
600 return -EINVAL;
601
602 mutex_lock(&xs->mutex);
603 stats.rx_dropped = xs->rx_dropped;
604 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
605 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
606 mutex_unlock(&xs->mutex);
607
608 if (copy_to_user(optval, &stats, sizeof(stats)))
609 return -EFAULT;
610 if (put_user(sizeof(stats), optlen))
611 return -EFAULT;
612
613 return 0;
614 }
Björn Töpelb3a9e0b2018-05-22 09:34:59 +0200615 case XDP_MMAP_OFFSETS:
616 {
617 struct xdp_mmap_offsets off;
618
619 if (len < sizeof(off))
620 return -EINVAL;
621
622 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
623 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
624 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
625 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
626 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
627 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
628
629 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
630 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
631 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
632 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
633 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
634 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
635
636 len = sizeof(off);
637 if (copy_to_user(optval, &off, len))
638 return -EFAULT;
639 if (put_user(len, optlen))
640 return -EFAULT;
641
642 return 0;
643 }
Magnus Karlssonaf75d9e2018-05-02 13:01:35 +0200644 default:
645 break;
646 }
647
648 return -EOPNOTSUPP;
649}
650
Magnus Karlsson423f3832018-05-02 13:01:24 +0200651static int xsk_mmap(struct file *file, struct socket *sock,
652 struct vm_area_struct *vma)
653{
Geert Uytterhoevena5a16e42018-06-07 15:37:34 +0200654 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200655 unsigned long size = vma->vm_end - vma->vm_start;
656 struct xdp_sock *xs = xdp_sk(sock->sk);
657 struct xsk_queue *q = NULL;
Björn Töpel37b07692018-05-22 09:35:01 +0200658 struct xdp_umem *umem;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200659 unsigned long pfn;
660 struct page *qpg;
661
Björn Töpelb9b6b682018-05-02 13:01:25 +0200662 if (offset == XDP_PGOFF_RX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200663 q = READ_ONCE(xs->rx);
Magnus Karlssonf6145902018-05-02 13:01:32 +0200664 } else if (offset == XDP_PGOFF_TX_RING) {
Björn Töpel37b07692018-05-22 09:35:01 +0200665 q = READ_ONCE(xs->tx);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200666 } else {
Björn Töpel37b07692018-05-22 09:35:01 +0200667 umem = READ_ONCE(xs->umem);
668 if (!umem)
Björn Töpelb9b6b682018-05-02 13:01:25 +0200669 return -EINVAL;
Magnus Karlsson423f3832018-05-02 13:01:24 +0200670
Björn Töpelb9b6b682018-05-02 13:01:25 +0200671 if (offset == XDP_UMEM_PGOFF_FILL_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200672 q = READ_ONCE(umem->fq);
Magnus Karlssonfe230832018-05-02 13:01:31 +0200673 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
Björn Töpel37b07692018-05-22 09:35:01 +0200674 q = READ_ONCE(umem->cq);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200675 }
Magnus Karlsson423f3832018-05-02 13:01:24 +0200676
677 if (!q)
678 return -EINVAL;
679
680 qpg = virt_to_head_page(q->ring);
681 if (size > (PAGE_SIZE << compound_order(qpg)))
682 return -EINVAL;
683
684 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
685 return remap_pfn_range(vma, vma->vm_start, pfn,
686 size, vma->vm_page_prot);
687}
688
Björn Töpelc0c77d82018-05-02 13:01:23 +0200689static struct proto xsk_proto = {
690 .name = "XDP",
691 .owner = THIS_MODULE,
692 .obj_size = sizeof(struct xdp_sock),
693};
694
695static const struct proto_ops xsk_proto_ops = {
Björn Töpelc2f43742018-05-18 14:00:24 +0200696 .family = PF_XDP,
697 .owner = THIS_MODULE,
698 .release = xsk_release,
699 .bind = xsk_bind,
700 .connect = sock_no_connect,
701 .socketpair = sock_no_socketpair,
702 .accept = sock_no_accept,
703 .getname = sock_no_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700704 .poll = xsk_poll,
Björn Töpelc2f43742018-05-18 14:00:24 +0200705 .ioctl = sock_no_ioctl,
706 .listen = sock_no_listen,
707 .shutdown = sock_no_shutdown,
708 .setsockopt = xsk_setsockopt,
709 .getsockopt = xsk_getsockopt,
710 .sendmsg = xsk_sendmsg,
711 .recvmsg = sock_no_recvmsg,
712 .mmap = xsk_mmap,
713 .sendpage = sock_no_sendpage,
Björn Töpelc0c77d82018-05-02 13:01:23 +0200714};
715
716static void xsk_destruct(struct sock *sk)
717{
718 struct xdp_sock *xs = xdp_sk(sk);
719
720 if (!sock_flag(sk, SOCK_DEAD))
721 return;
722
Björn Töpelc0c77d82018-05-02 13:01:23 +0200723 xdp_put_umem(xs->umem);
724
725 sk_refcnt_debug_dec(sk);
726}
727
728static int xsk_create(struct net *net, struct socket *sock, int protocol,
729 int kern)
730{
731 struct sock *sk;
732 struct xdp_sock *xs;
733
734 if (!ns_capable(net->user_ns, CAP_NET_RAW))
735 return -EPERM;
736 if (sock->type != SOCK_RAW)
737 return -ESOCKTNOSUPPORT;
738
739 if (protocol)
740 return -EPROTONOSUPPORT;
741
742 sock->state = SS_UNCONNECTED;
743
744 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
745 if (!sk)
746 return -ENOBUFS;
747
748 sock->ops = &xsk_proto_ops;
749
750 sock_init_data(sock, sk);
751
752 sk->sk_family = PF_XDP;
753
754 sk->sk_destruct = xsk_destruct;
755 sk_refcnt_debug_inc(sk);
756
Björn Töpelcee27162018-10-08 19:40:16 +0200757 sock_set_flag(sk, SOCK_RCU_FREE);
758
Björn Töpelc0c77d82018-05-02 13:01:23 +0200759 xs = xdp_sk(sk);
760 mutex_init(&xs->mutex);
Magnus Karlssona9744f72018-06-29 09:48:20 +0200761 spin_lock_init(&xs->tx_completion_lock);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200762
763 local_bh_disable();
764 sock_prot_inuse_add(net, &xsk_proto, 1);
765 local_bh_enable();
766
767 return 0;
768}
769
770static const struct net_proto_family xsk_family_ops = {
771 .family = PF_XDP,
772 .create = xsk_create,
773 .owner = THIS_MODULE,
774};
775
776static int __init xsk_init(void)
777{
778 int err;
779
780 err = proto_register(&xsk_proto, 0 /* no slab */);
781 if (err)
782 goto out;
783
784 err = sock_register(&xsk_family_ops);
785 if (err)
786 goto out_proto;
787
788 return 0;
789
790out_proto:
791 proto_unregister(&xsk_proto);
792out:
793 return err;
794}
795
796fs_initcall(xsk_init);