blob: 9ad791ff4739b376127445990da99801fa669714 [file] [log] [blame]
Björn Töpelc0c77d82018-05-02 13:01:23 +02001// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
14
15#include "xdp_umem.h"
16
Björn Töpelbbff2f32018-06-04 13:57:13 +020017#define XDP_UMEM_MIN_CHUNK_SIZE 2048
Björn Töpelc0c77d82018-05-02 13:01:23 +020018
Björn Töpelc0c77d82018-05-02 13:01:23 +020019static void xdp_umem_unpin_pages(struct xdp_umem *umem)
20{
21 unsigned int i;
22
Björn Töpela49049e2018-05-22 09:35:02 +020023 for (i = 0; i < umem->npgs; i++) {
24 struct page *page = umem->pgs[i];
Björn Töpelc0c77d82018-05-02 13:01:23 +020025
Björn Töpela49049e2018-05-22 09:35:02 +020026 set_page_dirty_lock(page);
27 put_page(page);
Björn Töpelc0c77d82018-05-02 13:01:23 +020028 }
Björn Töpela49049e2018-05-22 09:35:02 +020029
30 kfree(umem->pgs);
31 umem->pgs = NULL;
Björn Töpelc0c77d82018-05-02 13:01:23 +020032}
33
34static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
35{
Björn Töpela49049e2018-05-22 09:35:02 +020036 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
37 free_uid(umem->user);
Björn Töpelc0c77d82018-05-02 13:01:23 +020038}
39
40static void xdp_umem_release(struct xdp_umem *umem)
41{
42 struct task_struct *task;
43 struct mm_struct *mm;
44
Magnus Karlsson423f3832018-05-02 13:01:24 +020045 if (umem->fq) {
46 xskq_destroy(umem->fq);
47 umem->fq = NULL;
48 }
49
Magnus Karlssonfe230832018-05-02 13:01:31 +020050 if (umem->cq) {
51 xskq_destroy(umem->cq);
52 umem->cq = NULL;
53 }
54
Björn Töpela49049e2018-05-22 09:35:02 +020055 xdp_umem_unpin_pages(umem);
Björn Töpelc0c77d82018-05-02 13:01:23 +020056
Björn Töpela49049e2018-05-22 09:35:02 +020057 task = get_pid_task(umem->pid, PIDTYPE_PID);
58 put_pid(umem->pid);
59 if (!task)
60 goto out;
61 mm = get_task_mm(task);
62 put_task_struct(task);
63 if (!mm)
64 goto out;
Björn Töpelc0c77d82018-05-02 13:01:23 +020065
Björn Töpela49049e2018-05-22 09:35:02 +020066 mmput(mm);
Björn Töpelc0c77d82018-05-02 13:01:23 +020067 xdp_umem_unaccount_pages(umem);
68out:
69 kfree(umem);
70}
71
72static void xdp_umem_release_deferred(struct work_struct *work)
73{
74 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
75
76 xdp_umem_release(umem);
77}
78
79void xdp_get_umem(struct xdp_umem *umem)
80{
Björn Töpeld3b42f12018-05-22 09:35:03 +020081 refcount_inc(&umem->users);
Björn Töpelc0c77d82018-05-02 13:01:23 +020082}
83
84void xdp_put_umem(struct xdp_umem *umem)
85{
86 if (!umem)
87 return;
88
Björn Töpeld3b42f12018-05-22 09:35:03 +020089 if (refcount_dec_and_test(&umem->users)) {
Björn Töpelc0c77d82018-05-02 13:01:23 +020090 INIT_WORK(&umem->work, xdp_umem_release_deferred);
91 schedule_work(&umem->work);
92 }
93}
94
95static int xdp_umem_pin_pages(struct xdp_umem *umem)
96{
97 unsigned int gup_flags = FOLL_WRITE;
98 long npgs;
99 int err;
100
101 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL);
102 if (!umem->pgs)
103 return -ENOMEM;
104
105 down_write(&current->mm->mmap_sem);
106 npgs = get_user_pages(umem->address, umem->npgs,
107 gup_flags, &umem->pgs[0], NULL);
108 up_write(&current->mm->mmap_sem);
109
110 if (npgs != umem->npgs) {
111 if (npgs >= 0) {
112 umem->npgs = npgs;
113 err = -ENOMEM;
114 goto out_pin;
115 }
116 err = npgs;
117 goto out_pgs;
118 }
119 return 0;
120
121out_pin:
122 xdp_umem_unpin_pages(umem);
123out_pgs:
124 kfree(umem->pgs);
125 umem->pgs = NULL;
126 return err;
127}
128
129static int xdp_umem_account_pages(struct xdp_umem *umem)
130{
131 unsigned long lock_limit, new_npgs, old_npgs;
132
133 if (capable(CAP_IPC_LOCK))
134 return 0;
135
136 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
137 umem->user = get_uid(current_user());
138
139 do {
140 old_npgs = atomic_long_read(&umem->user->locked_vm);
141 new_npgs = old_npgs + umem->npgs;
142 if (new_npgs > lock_limit) {
143 free_uid(umem->user);
144 umem->user = NULL;
145 return -ENOBUFS;
146 }
147 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
148 new_npgs) != old_npgs);
149 return 0;
150}
151
Björn Töpela49049e2018-05-22 09:35:02 +0200152static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200153{
Björn Töpelbbff2f32018-06-04 13:57:13 +0200154 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
155 unsigned int chunks, chunks_per_page;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200156 u64 addr = mr->addr, size = mr->len;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200157 int size_chk, err;
158
Björn Töpelbbff2f32018-06-04 13:57:13 +0200159 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
Björn Töpelc0c77d82018-05-02 13:01:23 +0200160 /* Strictly speaking we could support this, if:
161 * - huge pages, or*
162 * - using an IOMMU, or
163 * - making sure the memory area is consecutive
164 * but for now, we simply say "computer says no".
165 */
166 return -EINVAL;
167 }
168
Björn Töpelbbff2f32018-06-04 13:57:13 +0200169 if (!is_power_of_2(chunk_size))
Björn Töpelc0c77d82018-05-02 13:01:23 +0200170 return -EINVAL;
171
172 if (!PAGE_ALIGNED(addr)) {
173 /* Memory area has to be page size aligned. For
174 * simplicity, this might change.
175 */
176 return -EINVAL;
177 }
178
179 if ((addr + size) < addr)
180 return -EINVAL;
181
Björn Töpelbbff2f32018-06-04 13:57:13 +0200182 chunks = (unsigned int)div_u64(size, chunk_size);
183 if (chunks == 0)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200184 return -EINVAL;
185
Björn Töpelbbff2f32018-06-04 13:57:13 +0200186 chunks_per_page = PAGE_SIZE / chunk_size;
187 if (chunks < chunks_per_page || chunks % chunks_per_page)
Björn Töpelc0c77d82018-05-02 13:01:23 +0200188 return -EINVAL;
189
Björn Töpelbbff2f32018-06-04 13:57:13 +0200190 headroom = ALIGN(headroom, 64);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200191
Björn Töpelbbff2f32018-06-04 13:57:13 +0200192 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200193 if (size_chk < 0)
194 return -EINVAL;
195
196 umem->pid = get_task_pid(current, PIDTYPE_PID);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200197 umem->address = (unsigned long)addr;
Björn Töpelbbff2f32018-06-04 13:57:13 +0200198 umem->props.chunk_mask = ~((u64)chunk_size - 1);
199 umem->props.size = size;
200 umem->headroom = headroom;
201 umem->chunk_size_nohr = chunk_size - headroom;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200202 umem->npgs = size / PAGE_SIZE;
203 umem->pgs = NULL;
204 umem->user = NULL;
205
Björn Töpeld3b42f12018-05-22 09:35:03 +0200206 refcount_set(&umem->users, 1);
Björn Töpelc0c77d82018-05-02 13:01:23 +0200207
208 err = xdp_umem_account_pages(umem);
209 if (err)
210 goto out;
211
212 err = xdp_umem_pin_pages(umem);
213 if (err)
214 goto out_account;
215 return 0;
216
217out_account:
218 xdp_umem_unaccount_pages(umem);
219out:
220 put_pid(umem->pid);
221 return err;
222}
Magnus Karlsson965a9902018-05-02 13:01:26 +0200223
Björn Töpela49049e2018-05-22 09:35:02 +0200224struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
225{
226 struct xdp_umem *umem;
227 int err;
228
229 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
230 if (!umem)
231 return ERR_PTR(-ENOMEM);
232
233 err = xdp_umem_reg(umem, mr);
234 if (err) {
235 kfree(umem);
236 return ERR_PTR(err);
237 }
238
239 return umem;
240}
241
Magnus Karlsson965a9902018-05-02 13:01:26 +0200242bool xdp_umem_validate_queues(struct xdp_umem *umem)
243{
Björn Töpelda60cf02018-05-18 14:00:23 +0200244 return umem->fq && umem->cq;
Magnus Karlsson965a9902018-05-02 13:01:26 +0200245}