blob: eb43f7e219f9910972273b27181811404c07103f [file] [log] [blame]
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -08001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
13#include <linux/err.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080014#include <linux/slab.h>
15#include <linux/mm.h>
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -070016#include <linux/filter.h>
Daniel Borkmann0cdf56402015-10-02 18:42:00 +020017#include <linux/perf_event.h>
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080018
Alexei Starovoitova10423b2016-02-01 22:39:54 -080019static void bpf_array_free_percpu(struct bpf_array *array)
20{
21 int i;
22
Eric Dumazet2a8bc532018-03-08 16:17:36 +010023 for (i = 0; i < array->map.max_entries; i++) {
Alexei Starovoitova10423b2016-02-01 22:39:54 -080024 free_percpu(array->pptrs[i]);
Eric Dumazet2a8bc532018-03-08 16:17:36 +010025 cond_resched();
26 }
Alexei Starovoitova10423b2016-02-01 22:39:54 -080027}
28
29static int bpf_array_alloc_percpu(struct bpf_array *array)
30{
31 void __percpu *ptr;
32 int i;
33
34 for (i = 0; i < array->map.max_entries; i++) {
35 ptr = __alloc_percpu_gfp(array->elem_size, 8,
36 GFP_USER | __GFP_NOWARN);
37 if (!ptr) {
38 bpf_array_free_percpu(array);
39 return -ENOMEM;
40 }
41 array->pptrs[i] = ptr;
Eric Dumazet2a8bc532018-03-08 16:17:36 +010042 cond_resched();
Alexei Starovoitova10423b2016-02-01 22:39:54 -080043 }
44
45 return 0;
46}
47
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080048/* Called from syscall */
49static struct bpf_map *array_map_alloc(union bpf_attr *attr)
50{
Alexei Starovoitova10423b2016-02-01 22:39:54 -080051 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
Alexei Starovoitova9bfac142018-01-07 17:33:02 -080052 u32 elem_size, index_mask, max_entries;
53 bool unpriv = !capable(CAP_SYS_ADMIN);
Daniel Borkmann422baf62018-03-08 16:17:33 +010054 u64 cost, array_size, mask64;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080055 struct bpf_array *array;
Daniel Borkmann422baf62018-03-08 16:17:33 +010056 int ret;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080057
58 /* check sanity of attributes */
59 if (attr->max_entries == 0 || attr->key_size != 4 ||
Alexei Starovoitov823707b2016-03-07 21:57:16 -080060 attr->value_size == 0 || attr->map_flags)
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080061 return ERR_PTR(-EINVAL);
62
Alexei Starovoitov01b3f522015-11-29 16:59:35 -080063 if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
64 /* if value_size is bigger, the user space won't be able to
65 * access the elements.
66 */
67 return ERR_PTR(-E2BIG);
68
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -080069 elem_size = round_up(attr->value_size, 8);
70
Alexei Starovoitova9bfac142018-01-07 17:33:02 -080071 max_entries = attr->max_entries;
Alexei Starovoitova9bfac142018-01-07 17:33:02 -080072
Daniel Borkmann820ef2a2018-01-10 23:25:05 +010073 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
74 * upper most bit set in u32 space is undefined behavior due to
75 * resulting 1U << 32, so do it manually here in u64 space.
76 */
77 mask64 = fls_long(max_entries - 1);
78 mask64 = 1ULL << mask64;
79 mask64 -= 1;
80
81 index_mask = mask64;
82 if (unpriv) {
Alexei Starovoitova9bfac142018-01-07 17:33:02 -080083 /* round up array size to nearest power of 2,
84 * since cpu will speculate within index_mask limits
85 */
86 max_entries = index_mask + 1;
Daniel Borkmann820ef2a2018-01-10 23:25:05 +010087 /* Check for overflows. */
88 if (max_entries < attr->max_entries)
89 return ERR_PTR(-E2BIG);
90 }
Alexei Starovoitova9bfac142018-01-07 17:33:02 -080091
Alexei Starovoitova10423b2016-02-01 22:39:54 -080092 array_size = sizeof(*array);
93 if (percpu)
Alexei Starovoitova9bfac142018-01-07 17:33:02 -080094 array_size += (u64) max_entries * sizeof(void *);
Alexei Starovoitova10423b2016-02-01 22:39:54 -080095 else
Alexei Starovoitova9bfac142018-01-07 17:33:02 -080096 array_size += (u64) max_entries * elem_size;
Alexei Starovoitova10423b2016-02-01 22:39:54 -080097
98 /* make sure there is no u32 overflow later in round_up() */
Daniel Borkmann422baf62018-03-08 16:17:33 +010099 cost = array_size;
100 if (cost >= U32_MAX - PAGE_SIZE)
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800101 return ERR_PTR(-ENOMEM);
Daniel Borkmann422baf62018-03-08 16:17:33 +0100102 if (percpu) {
103 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
104 if (cost >= U32_MAX - PAGE_SIZE)
105 return ERR_PTR(-ENOMEM);
106 }
107 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
108
109 ret = bpf_map_precharge_memlock(cost);
110 if (ret < 0)
111 return ERR_PTR(ret);
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800112
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800113 /* allocate all map elements and zero-initialize them */
Daniel Borkmann251d00b2017-01-18 15:14:17 +0100114 array = bpf_map_area_alloc(array_size);
115 if (!array)
116 return ERR_PTR(-ENOMEM);
Alexei Starovoitova9bfac142018-01-07 17:33:02 -0800117 array->index_mask = index_mask;
118 array->map.unpriv_array = unpriv;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800119
120 /* copy mandatory map attributes */
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800121 array->map.map_type = attr->map_type;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800122 array->map.key_size = attr->key_size;
123 array->map.value_size = attr->value_size;
124 array->map.max_entries = attr->max_entries;
Daniel Borkmann816cfeb2018-03-08 16:17:32 +0100125 array->map.map_flags = attr->map_flags;
Daniel Borkmann422baf62018-03-08 16:17:33 +0100126 array->map.pages = cost;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800127 array->elem_size = elem_size;
128
Daniel Borkmann422baf62018-03-08 16:17:33 +0100129 if (percpu &&
130 (elem_size > PCPU_MIN_UNIT_SIZE ||
131 bpf_array_alloc_percpu(array))) {
Daniel Borkmann251d00b2017-01-18 15:14:17 +0100132 bpf_map_area_free(array);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800133 return ERR_PTR(-ENOMEM);
134 }
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800135
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800136 return &array->map;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800137}
138
139/* Called from syscall or from eBPF program */
140static void *array_map_lookup_elem(struct bpf_map *map, void *key)
141{
142 struct bpf_array *array = container_of(map, struct bpf_array, map);
143 u32 index = *(u32 *)key;
144
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800145 if (unlikely(index >= array->map.max_entries))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800146 return NULL;
147
Alexei Starovoitova9bfac142018-01-07 17:33:02 -0800148 return array->value + array->elem_size * (index & array->index_mask);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800149}
150
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800151/* Called from eBPF program */
152static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
153{
154 struct bpf_array *array = container_of(map, struct bpf_array, map);
155 u32 index = *(u32 *)key;
156
157 if (unlikely(index >= array->map.max_entries))
158 return NULL;
159
Alexei Starovoitova9bfac142018-01-07 17:33:02 -0800160 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800161}
162
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800163int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
164{
165 struct bpf_array *array = container_of(map, struct bpf_array, map);
166 u32 index = *(u32 *)key;
167 void __percpu *pptr;
168 int cpu, off = 0;
169 u32 size;
170
171 if (unlikely(index >= array->map.max_entries))
172 return -ENOENT;
173
174 /* per_cpu areas are zero-filled and bpf programs can only
175 * access 'value_size' of them, so copying rounded areas
176 * will not leak any kernel data
177 */
178 size = round_up(map->value_size, 8);
179 rcu_read_lock();
Alexei Starovoitova9bfac142018-01-07 17:33:02 -0800180 pptr = array->pptrs[index & array->index_mask];
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800181 for_each_possible_cpu(cpu) {
182 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
183 off += size;
184 }
185 rcu_read_unlock();
186 return 0;
187}
188
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800189/* Called from syscall */
190static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
191{
192 struct bpf_array *array = container_of(map, struct bpf_array, map);
Teng Qinfcbc8d02017-04-24 19:00:37 -0700193 u32 index = key ? *(u32 *)key : U32_MAX;
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800194 u32 *next = (u32 *)next_key;
195
196 if (index >= array->map.max_entries) {
197 *next = 0;
198 return 0;
199 }
200
201 if (index == array->map.max_entries - 1)
202 return -ENOENT;
203
204 *next = index + 1;
205 return 0;
206}
207
208/* Called from syscall or from eBPF program */
209static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
210 u64 map_flags)
211{
212 struct bpf_array *array = container_of(map, struct bpf_array, map);
213 u32 index = *(u32 *)key;
214
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800215 if (unlikely(map_flags > BPF_EXIST))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800216 /* unknown flags */
217 return -EINVAL;
218
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800219 if (unlikely(index >= array->map.max_entries))
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800220 /* all elements were pre-allocated, cannot insert a new one */
221 return -E2BIG;
222
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800223 if (unlikely(map_flags == BPF_NOEXIST))
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800224 /* all elements already exist */
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800225 return -EEXIST;
226
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800227 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
Alexei Starovoitova9bfac142018-01-07 17:33:02 -0800228 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800229 value, map->value_size);
230 else
Alexei Starovoitova9bfac142018-01-07 17:33:02 -0800231 memcpy(array->value +
232 array->elem_size * (index & array->index_mask),
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800233 value, map->value_size);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800234 return 0;
235}
236
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800237int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
238 u64 map_flags)
239{
240 struct bpf_array *array = container_of(map, struct bpf_array, map);
241 u32 index = *(u32 *)key;
242 void __percpu *pptr;
243 int cpu, off = 0;
244 u32 size;
245
246 if (unlikely(map_flags > BPF_EXIST))
247 /* unknown flags */
248 return -EINVAL;
249
250 if (unlikely(index >= array->map.max_entries))
251 /* all elements were pre-allocated, cannot insert a new one */
252 return -E2BIG;
253
254 if (unlikely(map_flags == BPF_NOEXIST))
255 /* all elements already exist */
256 return -EEXIST;
257
258 /* the user space will provide round_up(value_size, 8) bytes that
259 * will be copied into per-cpu area. bpf programs can only access
260 * value_size of it. During lookup the same extra bytes will be
261 * returned or zeros which were zero-filled by percpu_alloc,
262 * so no kernel data leaks possible
263 */
264 size = round_up(map->value_size, 8);
265 rcu_read_lock();
Alexei Starovoitova9bfac142018-01-07 17:33:02 -0800266 pptr = array->pptrs[index & array->index_mask];
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800267 for_each_possible_cpu(cpu) {
268 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
269 off += size;
270 }
271 rcu_read_unlock();
272 return 0;
273}
274
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800275/* Called from syscall or from eBPF program */
276static int array_map_delete_elem(struct bpf_map *map, void *key)
277{
278 return -EINVAL;
279}
280
281/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
282static void array_map_free(struct bpf_map *map)
283{
284 struct bpf_array *array = container_of(map, struct bpf_array, map);
285
286 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
287 * so the programs (can be more than one that used this map) were
288 * disconnected from events. Wait for outstanding programs to complete
289 * and free the array
290 */
291 synchronize_rcu();
292
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800293 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
294 bpf_array_free_percpu(array);
295
Daniel Borkmann251d00b2017-01-18 15:14:17 +0100296 bpf_map_area_free(array);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800297}
298
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +0100299static const struct bpf_map_ops array_ops = {
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800300 .map_alloc = array_map_alloc,
301 .map_free = array_map_free,
302 .map_get_next_key = array_map_get_next_key,
303 .map_lookup_elem = array_map_lookup_elem,
304 .map_update_elem = array_map_update_elem,
305 .map_delete_elem = array_map_delete_elem,
306};
307
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +0100308static struct bpf_map_type_list array_type __read_mostly = {
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800309 .ops = &array_ops,
310 .type = BPF_MAP_TYPE_ARRAY,
311};
312
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800313static const struct bpf_map_ops percpu_array_ops = {
314 .map_alloc = array_map_alloc,
315 .map_free = array_map_free,
316 .map_get_next_key = array_map_get_next_key,
317 .map_lookup_elem = percpu_array_map_lookup_elem,
318 .map_update_elem = array_map_update_elem,
319 .map_delete_elem = array_map_delete_elem,
320};
321
322static struct bpf_map_type_list percpu_array_type __read_mostly = {
323 .ops = &percpu_array_ops,
324 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
325};
326
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800327static int __init register_array_map(void)
328{
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +0100329 bpf_register_map_type(&array_type);
Alexei Starovoitova10423b2016-02-01 22:39:54 -0800330 bpf_register_map_type(&percpu_array_type);
Alexei Starovoitov28fbcfa2014-11-13 17:36:46 -0800331 return 0;
332}
333late_initcall(register_array_map);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700334
Wang Nan2a36f0b2015-08-06 07:02:33 +0000335static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700336{
Wang Nan2a36f0b2015-08-06 07:02:33 +0000337 /* only file descriptors can be stored in this type of map */
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700338 if (attr->value_size != sizeof(u32))
339 return ERR_PTR(-EINVAL);
340 return array_map_alloc(attr);
341}
342
Wang Nan2a36f0b2015-08-06 07:02:33 +0000343static void fd_array_map_free(struct bpf_map *map)
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700344{
345 struct bpf_array *array = container_of(map, struct bpf_array, map);
346 int i;
347
348 synchronize_rcu();
349
350 /* make sure it's empty */
351 for (i = 0; i < array->map.max_entries; i++)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000352 BUG_ON(array->ptrs[i] != NULL);
Daniel Borkmann251d00b2017-01-18 15:14:17 +0100353
354 bpf_map_area_free(array);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700355}
356
Wang Nan2a36f0b2015-08-06 07:02:33 +0000357static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700358{
359 return NULL;
360}
361
362/* only called from syscall */
Daniel Borkmannd056a782016-06-15 22:47:13 +0200363int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
364 void *key, void *value, u64 map_flags)
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700365{
366 struct bpf_array *array = container_of(map, struct bpf_array, map);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000367 void *new_ptr, *old_ptr;
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700368 u32 index = *(u32 *)key, ufd;
369
370 if (map_flags != BPF_ANY)
371 return -EINVAL;
372
373 if (index >= array->map.max_entries)
374 return -E2BIG;
375
376 ufd = *(u32 *)value;
Daniel Borkmannd056a782016-06-15 22:47:13 +0200377 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000378 if (IS_ERR(new_ptr))
379 return PTR_ERR(new_ptr);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700380
Wang Nan2a36f0b2015-08-06 07:02:33 +0000381 old_ptr = xchg(array->ptrs + index, new_ptr);
382 if (old_ptr)
383 map->ops->map_fd_put_ptr(old_ptr);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700384
385 return 0;
386}
387
Wang Nan2a36f0b2015-08-06 07:02:33 +0000388static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700389{
390 struct bpf_array *array = container_of(map, struct bpf_array, map);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000391 void *old_ptr;
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700392 u32 index = *(u32 *)key;
393
394 if (index >= array->map.max_entries)
395 return -E2BIG;
396
Wang Nan2a36f0b2015-08-06 07:02:33 +0000397 old_ptr = xchg(array->ptrs + index, NULL);
398 if (old_ptr) {
399 map->ops->map_fd_put_ptr(old_ptr);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700400 return 0;
401 } else {
402 return -ENOENT;
403 }
404}
405
Daniel Borkmannd056a782016-06-15 22:47:13 +0200406static void *prog_fd_array_get_ptr(struct bpf_map *map,
407 struct file *map_file, int fd)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000408{
409 struct bpf_array *array = container_of(map, struct bpf_array, map);
410 struct bpf_prog *prog = bpf_prog_get(fd);
Daniel Borkmannd056a782016-06-15 22:47:13 +0200411
Wang Nan2a36f0b2015-08-06 07:02:33 +0000412 if (IS_ERR(prog))
413 return prog;
414
415 if (!bpf_prog_array_compatible(array, prog)) {
416 bpf_prog_put(prog);
417 return ERR_PTR(-EINVAL);
418 }
Daniel Borkmannd056a782016-06-15 22:47:13 +0200419
Wang Nan2a36f0b2015-08-06 07:02:33 +0000420 return prog;
421}
422
423static void prog_fd_array_put_ptr(void *ptr)
424{
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200425 bpf_prog_put(ptr);
Wang Nan2a36f0b2015-08-06 07:02:33 +0000426}
427
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700428/* decrement refcnt of all bpf_progs that are stored in this map */
Wang Nan2a36f0b2015-08-06 07:02:33 +0000429void bpf_fd_array_map_clear(struct bpf_map *map)
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700430{
431 struct bpf_array *array = container_of(map, struct bpf_array, map);
432 int i;
433
434 for (i = 0; i < array->map.max_entries; i++)
Wang Nan2a36f0b2015-08-06 07:02:33 +0000435 fd_array_map_delete_elem(map, &i);
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700436}
437
438static const struct bpf_map_ops prog_array_ops = {
Wang Nan2a36f0b2015-08-06 07:02:33 +0000439 .map_alloc = fd_array_map_alloc,
440 .map_free = fd_array_map_free,
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700441 .map_get_next_key = array_map_get_next_key,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000442 .map_lookup_elem = fd_array_map_lookup_elem,
Wang Nan2a36f0b2015-08-06 07:02:33 +0000443 .map_delete_elem = fd_array_map_delete_elem,
444 .map_fd_get_ptr = prog_fd_array_get_ptr,
445 .map_fd_put_ptr = prog_fd_array_put_ptr,
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -0700446};
447
448static struct bpf_map_type_list prog_array_type __read_mostly = {
449 .ops = &prog_array_ops,
450 .type = BPF_MAP_TYPE_PROG_ARRAY,
451};
452
453static int __init register_prog_array_map(void)
454{
455 bpf_register_map_type(&prog_array_type);
456 return 0;
457}
458late_initcall(register_prog_array_map);
Kaixu Xiaea317b22015-08-06 07:02:34 +0000459
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200460static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
461 struct file *map_file)
Kaixu Xiaea317b22015-08-06 07:02:34 +0000462{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200463 struct bpf_event_entry *ee;
464
Daniel Borkmann858d68f2016-07-16 01:15:55 +0200465 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200466 if (ee) {
467 ee->event = perf_file->private_data;
468 ee->perf_file = perf_file;
469 ee->map_file = map_file;
470 }
471
472 return ee;
473}
474
475static void __bpf_event_entry_free(struct rcu_head *rcu)
476{
477 struct bpf_event_entry *ee;
478
479 ee = container_of(rcu, struct bpf_event_entry, rcu);
480 fput(ee->perf_file);
481 kfree(ee);
482}
483
484static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
485{
486 call_rcu(&ee->rcu, __bpf_event_entry_free);
Kaixu Xiaea317b22015-08-06 07:02:34 +0000487}
488
Daniel Borkmannd056a782016-06-15 22:47:13 +0200489static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
490 struct file *map_file, int fd)
Kaixu Xiaea317b22015-08-06 07:02:34 +0000491{
Kaixu Xiaea317b22015-08-06 07:02:34 +0000492 const struct perf_event_attr *attr;
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200493 struct bpf_event_entry *ee;
494 struct perf_event *event;
495 struct file *perf_file;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000496
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200497 perf_file = perf_event_get(fd);
498 if (IS_ERR(perf_file))
499 return perf_file;
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -0800500
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200501 event = perf_file->private_data;
502 ee = ERR_PTR(-EINVAL);
Kaixu Xiaea317b22015-08-06 07:02:34 +0000503
504 attr = perf_event_attrs(event);
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200505 if (IS_ERR(attr) || attr->inherit)
506 goto err_out;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000507
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200508 switch (attr->type) {
509 case PERF_TYPE_SOFTWARE:
510 if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
511 goto err_out;
512 /* fall-through */
513 case PERF_TYPE_RAW:
514 case PERF_TYPE_HARDWARE:
515 ee = bpf_event_entry_gen(perf_file, map_file);
516 if (ee)
517 return ee;
518 ee = ERR_PTR(-ENOMEM);
519 /* fall-through */
520 default:
521 break;
522 }
Alexei Starovoitov62544ce2015-10-22 17:10:14 -0700523
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200524err_out:
525 fput(perf_file);
526 return ee;
Kaixu Xiaea317b22015-08-06 07:02:34 +0000527}
528
529static void perf_event_fd_array_put_ptr(void *ptr)
530{
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200531 bpf_event_entry_free_rcu(ptr);
532}
533
534static void perf_event_fd_array_release(struct bpf_map *map,
535 struct file *map_file)
536{
537 struct bpf_array *array = container_of(map, struct bpf_array, map);
538 struct bpf_event_entry *ee;
539 int i;
540
541 rcu_read_lock();
542 for (i = 0; i < array->map.max_entries; i++) {
543 ee = READ_ONCE(array->ptrs[i]);
544 if (ee && ee->map_file == map_file)
545 fd_array_map_delete_elem(map, &i);
546 }
547 rcu_read_unlock();
Kaixu Xiaea317b22015-08-06 07:02:34 +0000548}
549
550static const struct bpf_map_ops perf_event_array_ops = {
551 .map_alloc = fd_array_map_alloc,
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200552 .map_free = fd_array_map_free,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000553 .map_get_next_key = array_map_get_next_key,
554 .map_lookup_elem = fd_array_map_lookup_elem,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000555 .map_delete_elem = fd_array_map_delete_elem,
556 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
557 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
Daniel Borkmann3b1efb12016-06-15 22:47:14 +0200558 .map_release = perf_event_fd_array_release,
Kaixu Xiaea317b22015-08-06 07:02:34 +0000559};
560
561static struct bpf_map_type_list perf_event_array_type __read_mostly = {
562 .ops = &perf_event_array_ops,
563 .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
564};
565
566static int __init register_perf_event_array_map(void)
567{
568 bpf_register_map_type(&perf_event_array_type);
569 return 0;
570}
571late_initcall(register_perf_event_array_map);
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700572
Sargun Dhillon60d20f92016-08-12 08:56:52 -0700573#ifdef CONFIG_CGROUPS
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700574static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
575 struct file *map_file /* not used */,
576 int fd)
577{
578 return cgroup_get_from_fd(fd);
579}
580
581static void cgroup_fd_array_put_ptr(void *ptr)
582{
583 /* cgroup_put free cgrp after a rcu grace period */
584 cgroup_put(ptr);
585}
586
587static void cgroup_fd_array_free(struct bpf_map *map)
588{
589 bpf_fd_array_map_clear(map);
590 fd_array_map_free(map);
591}
592
593static const struct bpf_map_ops cgroup_array_ops = {
594 .map_alloc = fd_array_map_alloc,
595 .map_free = cgroup_fd_array_free,
596 .map_get_next_key = array_map_get_next_key,
597 .map_lookup_elem = fd_array_map_lookup_elem,
598 .map_delete_elem = fd_array_map_delete_elem,
599 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
600 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
601};
602
603static struct bpf_map_type_list cgroup_array_type __read_mostly = {
604 .ops = &cgroup_array_ops,
605 .type = BPF_MAP_TYPE_CGROUP_ARRAY,
606};
607
608static int __init register_cgroup_array_map(void)
609{
610 bpf_register_map_type(&cgroup_array_type);
611 return 0;
612}
613late_initcall(register_cgroup_array_map);
614#endif