Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, but |
| 8 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 10 | * General Public License for more details. |
| 11 | */ |
| 12 | #include <linux/bpf.h> |
| 13 | #include <linux/err.h> |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 14 | #include <linux/slab.h> |
| 15 | #include <linux/mm.h> |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 16 | #include <linux/filter.h> |
Daniel Borkmann | 0cdf5640 | 2015-10-02 18:42:00 +0200 | [diff] [blame] | 17 | #include <linux/perf_event.h> |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 18 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 19 | static void bpf_array_free_percpu(struct bpf_array *array) |
| 20 | { |
| 21 | int i; |
| 22 | |
| 23 | for (i = 0; i < array->map.max_entries; i++) |
| 24 | free_percpu(array->pptrs[i]); |
| 25 | } |
| 26 | |
| 27 | static int bpf_array_alloc_percpu(struct bpf_array *array) |
| 28 | { |
| 29 | void __percpu *ptr; |
| 30 | int i; |
| 31 | |
| 32 | for (i = 0; i < array->map.max_entries; i++) { |
| 33 | ptr = __alloc_percpu_gfp(array->elem_size, 8, |
| 34 | GFP_USER | __GFP_NOWARN); |
| 35 | if (!ptr) { |
| 36 | bpf_array_free_percpu(array); |
| 37 | return -ENOMEM; |
| 38 | } |
| 39 | array->pptrs[i] = ptr; |
| 40 | } |
| 41 | |
| 42 | return 0; |
| 43 | } |
| 44 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 45 | /* Called from syscall */ |
| 46 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) |
| 47 | { |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 48 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 49 | u32 elem_size, index_mask, max_entries; |
| 50 | bool unpriv = !capable(CAP_SYS_ADMIN); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 51 | struct bpf_array *array; |
Daniel Borkmann | 820ef2a | 2018-01-10 23:25:05 +0100 | [diff] [blame^] | 52 | u64 array_size, mask64; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 53 | |
| 54 | /* check sanity of attributes */ |
| 55 | if (attr->max_entries == 0 || attr->key_size != 4 || |
Alexei Starovoitov | 823707b | 2016-03-07 21:57:16 -0800 | [diff] [blame] | 56 | attr->value_size == 0 || attr->map_flags) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 57 | return ERR_PTR(-EINVAL); |
| 58 | |
Alexei Starovoitov | 01b3f52 | 2015-11-29 16:59:35 -0800 | [diff] [blame] | 59 | if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1)) |
| 60 | /* if value_size is bigger, the user space won't be able to |
| 61 | * access the elements. |
| 62 | */ |
| 63 | return ERR_PTR(-E2BIG); |
| 64 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 65 | elem_size = round_up(attr->value_size, 8); |
| 66 | |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 67 | max_entries = attr->max_entries; |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 68 | |
Daniel Borkmann | 820ef2a | 2018-01-10 23:25:05 +0100 | [diff] [blame^] | 69 | /* On 32 bit archs roundup_pow_of_two() with max_entries that has |
| 70 | * upper most bit set in u32 space is undefined behavior due to |
| 71 | * resulting 1U << 32, so do it manually here in u64 space. |
| 72 | */ |
| 73 | mask64 = fls_long(max_entries - 1); |
| 74 | mask64 = 1ULL << mask64; |
| 75 | mask64 -= 1; |
| 76 | |
| 77 | index_mask = mask64; |
| 78 | if (unpriv) { |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 79 | /* round up array size to nearest power of 2, |
| 80 | * since cpu will speculate within index_mask limits |
| 81 | */ |
| 82 | max_entries = index_mask + 1; |
Daniel Borkmann | 820ef2a | 2018-01-10 23:25:05 +0100 | [diff] [blame^] | 83 | /* Check for overflows. */ |
| 84 | if (max_entries < attr->max_entries) |
| 85 | return ERR_PTR(-E2BIG); |
| 86 | } |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 87 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 88 | array_size = sizeof(*array); |
| 89 | if (percpu) |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 90 | array_size += (u64) max_entries * sizeof(void *); |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 91 | else |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 92 | array_size += (u64) max_entries * elem_size; |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 93 | |
| 94 | /* make sure there is no u32 overflow later in round_up() */ |
| 95 | if (array_size >= U32_MAX - PAGE_SIZE) |
Alexei Starovoitov | daaf427 | 2014-11-18 17:32:16 -0800 | [diff] [blame] | 96 | return ERR_PTR(-ENOMEM); |
| 97 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 98 | /* allocate all map elements and zero-initialize them */ |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 99 | array = bpf_map_area_alloc(array_size); |
| 100 | if (!array) |
| 101 | return ERR_PTR(-ENOMEM); |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 102 | array->index_mask = index_mask; |
| 103 | array->map.unpriv_array = unpriv; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 104 | |
| 105 | /* copy mandatory map attributes */ |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 106 | array->map.map_type = attr->map_type; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 107 | array->map.key_size = attr->key_size; |
| 108 | array->map.value_size = attr->value_size; |
| 109 | array->map.max_entries = attr->max_entries; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 110 | array->elem_size = elem_size; |
| 111 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 112 | if (!percpu) |
| 113 | goto out; |
| 114 | |
| 115 | array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); |
| 116 | |
| 117 | if (array_size >= U32_MAX - PAGE_SIZE || |
| 118 | elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 119 | bpf_map_area_free(array); |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 120 | return ERR_PTR(-ENOMEM); |
| 121 | } |
| 122 | out: |
| 123 | array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; |
| 124 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 125 | return &array->map; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | /* Called from syscall or from eBPF program */ |
| 129 | static void *array_map_lookup_elem(struct bpf_map *map, void *key) |
| 130 | { |
| 131 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 132 | u32 index = *(u32 *)key; |
| 133 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 134 | if (unlikely(index >= array->map.max_entries)) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 135 | return NULL; |
| 136 | |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 137 | return array->value + array->elem_size * (index & array->index_mask); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 138 | } |
| 139 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 140 | /* Called from eBPF program */ |
| 141 | static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) |
| 142 | { |
| 143 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 144 | u32 index = *(u32 *)key; |
| 145 | |
| 146 | if (unlikely(index >= array->map.max_entries)) |
| 147 | return NULL; |
| 148 | |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 149 | return this_cpu_ptr(array->pptrs[index & array->index_mask]); |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 150 | } |
| 151 | |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 152 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) |
| 153 | { |
| 154 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 155 | u32 index = *(u32 *)key; |
| 156 | void __percpu *pptr; |
| 157 | int cpu, off = 0; |
| 158 | u32 size; |
| 159 | |
| 160 | if (unlikely(index >= array->map.max_entries)) |
| 161 | return -ENOENT; |
| 162 | |
| 163 | /* per_cpu areas are zero-filled and bpf programs can only |
| 164 | * access 'value_size' of them, so copying rounded areas |
| 165 | * will not leak any kernel data |
| 166 | */ |
| 167 | size = round_up(map->value_size, 8); |
| 168 | rcu_read_lock(); |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 169 | pptr = array->pptrs[index & array->index_mask]; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 170 | for_each_possible_cpu(cpu) { |
| 171 | bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); |
| 172 | off += size; |
| 173 | } |
| 174 | rcu_read_unlock(); |
| 175 | return 0; |
| 176 | } |
| 177 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 178 | /* Called from syscall */ |
| 179 | static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 180 | { |
| 181 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 182 | u32 index = *(u32 *)key; |
| 183 | u32 *next = (u32 *)next_key; |
| 184 | |
| 185 | if (index >= array->map.max_entries) { |
| 186 | *next = 0; |
| 187 | return 0; |
| 188 | } |
| 189 | |
| 190 | if (index == array->map.max_entries - 1) |
| 191 | return -ENOENT; |
| 192 | |
| 193 | *next = index + 1; |
| 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | /* Called from syscall or from eBPF program */ |
| 198 | static int array_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 199 | u64 map_flags) |
| 200 | { |
| 201 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 202 | u32 index = *(u32 *)key; |
| 203 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 204 | if (unlikely(map_flags > BPF_EXIST)) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 205 | /* unknown flags */ |
| 206 | return -EINVAL; |
| 207 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 208 | if (unlikely(index >= array->map.max_entries)) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 209 | /* all elements were pre-allocated, cannot insert a new one */ |
| 210 | return -E2BIG; |
| 211 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 212 | if (unlikely(map_flags == BPF_NOEXIST)) |
Alexei Starovoitov | daaf427 | 2014-11-18 17:32:16 -0800 | [diff] [blame] | 213 | /* all elements already exist */ |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 214 | return -EEXIST; |
| 215 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 216 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 217 | memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 218 | value, map->value_size); |
| 219 | else |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 220 | memcpy(array->value + |
| 221 | array->elem_size * (index & array->index_mask), |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 222 | value, map->value_size); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 223 | return 0; |
| 224 | } |
| 225 | |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 226 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, |
| 227 | u64 map_flags) |
| 228 | { |
| 229 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 230 | u32 index = *(u32 *)key; |
| 231 | void __percpu *pptr; |
| 232 | int cpu, off = 0; |
| 233 | u32 size; |
| 234 | |
| 235 | if (unlikely(map_flags > BPF_EXIST)) |
| 236 | /* unknown flags */ |
| 237 | return -EINVAL; |
| 238 | |
| 239 | if (unlikely(index >= array->map.max_entries)) |
| 240 | /* all elements were pre-allocated, cannot insert a new one */ |
| 241 | return -E2BIG; |
| 242 | |
| 243 | if (unlikely(map_flags == BPF_NOEXIST)) |
| 244 | /* all elements already exist */ |
| 245 | return -EEXIST; |
| 246 | |
| 247 | /* the user space will provide round_up(value_size, 8) bytes that |
| 248 | * will be copied into per-cpu area. bpf programs can only access |
| 249 | * value_size of it. During lookup the same extra bytes will be |
| 250 | * returned or zeros which were zero-filled by percpu_alloc, |
| 251 | * so no kernel data leaks possible |
| 252 | */ |
| 253 | size = round_up(map->value_size, 8); |
| 254 | rcu_read_lock(); |
Alexei Starovoitov | a9bfac14 | 2018-01-07 17:33:02 -0800 | [diff] [blame] | 255 | pptr = array->pptrs[index & array->index_mask]; |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 256 | for_each_possible_cpu(cpu) { |
| 257 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); |
| 258 | off += size; |
| 259 | } |
| 260 | rcu_read_unlock(); |
| 261 | return 0; |
| 262 | } |
| 263 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 264 | /* Called from syscall or from eBPF program */ |
| 265 | static int array_map_delete_elem(struct bpf_map *map, void *key) |
| 266 | { |
| 267 | return -EINVAL; |
| 268 | } |
| 269 | |
| 270 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
| 271 | static void array_map_free(struct bpf_map *map) |
| 272 | { |
| 273 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 274 | |
| 275 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, |
| 276 | * so the programs (can be more than one that used this map) were |
| 277 | * disconnected from events. Wait for outstanding programs to complete |
| 278 | * and free the array |
| 279 | */ |
| 280 | synchronize_rcu(); |
| 281 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 282 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
| 283 | bpf_array_free_percpu(array); |
| 284 | |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 285 | bpf_map_area_free(array); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 286 | } |
| 287 | |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 288 | static const struct bpf_map_ops array_ops = { |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 289 | .map_alloc = array_map_alloc, |
| 290 | .map_free = array_map_free, |
| 291 | .map_get_next_key = array_map_get_next_key, |
| 292 | .map_lookup_elem = array_map_lookup_elem, |
| 293 | .map_update_elem = array_map_update_elem, |
| 294 | .map_delete_elem = array_map_delete_elem, |
| 295 | }; |
| 296 | |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 297 | static struct bpf_map_type_list array_type __read_mostly = { |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 298 | .ops = &array_ops, |
| 299 | .type = BPF_MAP_TYPE_ARRAY, |
| 300 | }; |
| 301 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 302 | static const struct bpf_map_ops percpu_array_ops = { |
| 303 | .map_alloc = array_map_alloc, |
| 304 | .map_free = array_map_free, |
| 305 | .map_get_next_key = array_map_get_next_key, |
| 306 | .map_lookup_elem = percpu_array_map_lookup_elem, |
| 307 | .map_update_elem = array_map_update_elem, |
| 308 | .map_delete_elem = array_map_delete_elem, |
| 309 | }; |
| 310 | |
| 311 | static struct bpf_map_type_list percpu_array_type __read_mostly = { |
| 312 | .ops = &percpu_array_ops, |
| 313 | .type = BPF_MAP_TYPE_PERCPU_ARRAY, |
| 314 | }; |
| 315 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 316 | static int __init register_array_map(void) |
| 317 | { |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 318 | bpf_register_map_type(&array_type); |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 319 | bpf_register_map_type(&percpu_array_type); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 320 | return 0; |
| 321 | } |
| 322 | late_initcall(register_array_map); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 323 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 324 | static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 325 | { |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 326 | /* only file descriptors can be stored in this type of map */ |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 327 | if (attr->value_size != sizeof(u32)) |
| 328 | return ERR_PTR(-EINVAL); |
| 329 | return array_map_alloc(attr); |
| 330 | } |
| 331 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 332 | static void fd_array_map_free(struct bpf_map *map) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 333 | { |
| 334 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 335 | int i; |
| 336 | |
| 337 | synchronize_rcu(); |
| 338 | |
| 339 | /* make sure it's empty */ |
| 340 | for (i = 0; i < array->map.max_entries; i++) |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 341 | BUG_ON(array->ptrs[i] != NULL); |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 342 | |
| 343 | bpf_map_area_free(array); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 344 | } |
| 345 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 346 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 347 | { |
| 348 | return NULL; |
| 349 | } |
| 350 | |
| 351 | /* only called from syscall */ |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 352 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
| 353 | void *key, void *value, u64 map_flags) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 354 | { |
| 355 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 356 | void *new_ptr, *old_ptr; |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 357 | u32 index = *(u32 *)key, ufd; |
| 358 | |
| 359 | if (map_flags != BPF_ANY) |
| 360 | return -EINVAL; |
| 361 | |
| 362 | if (index >= array->map.max_entries) |
| 363 | return -E2BIG; |
| 364 | |
| 365 | ufd = *(u32 *)value; |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 366 | new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 367 | if (IS_ERR(new_ptr)) |
| 368 | return PTR_ERR(new_ptr); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 369 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 370 | old_ptr = xchg(array->ptrs + index, new_ptr); |
| 371 | if (old_ptr) |
| 372 | map->ops->map_fd_put_ptr(old_ptr); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 373 | |
| 374 | return 0; |
| 375 | } |
| 376 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 377 | static int fd_array_map_delete_elem(struct bpf_map *map, void *key) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 378 | { |
| 379 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 380 | void *old_ptr; |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 381 | u32 index = *(u32 *)key; |
| 382 | |
| 383 | if (index >= array->map.max_entries) |
| 384 | return -E2BIG; |
| 385 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 386 | old_ptr = xchg(array->ptrs + index, NULL); |
| 387 | if (old_ptr) { |
| 388 | map->ops->map_fd_put_ptr(old_ptr); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 389 | return 0; |
| 390 | } else { |
| 391 | return -ENOENT; |
| 392 | } |
| 393 | } |
| 394 | |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 395 | static void *prog_fd_array_get_ptr(struct bpf_map *map, |
| 396 | struct file *map_file, int fd) |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 397 | { |
| 398 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 399 | struct bpf_prog *prog = bpf_prog_get(fd); |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 400 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 401 | if (IS_ERR(prog)) |
| 402 | return prog; |
| 403 | |
| 404 | if (!bpf_prog_array_compatible(array, prog)) { |
| 405 | bpf_prog_put(prog); |
| 406 | return ERR_PTR(-EINVAL); |
| 407 | } |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 408 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 409 | return prog; |
| 410 | } |
| 411 | |
| 412 | static void prog_fd_array_put_ptr(void *ptr) |
| 413 | { |
Daniel Borkmann | 1aacde3 | 2016-06-30 17:24:43 +0200 | [diff] [blame] | 414 | bpf_prog_put(ptr); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 415 | } |
| 416 | |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 417 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 418 | void bpf_fd_array_map_clear(struct bpf_map *map) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 419 | { |
| 420 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 421 | int i; |
| 422 | |
| 423 | for (i = 0; i < array->map.max_entries; i++) |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 424 | fd_array_map_delete_elem(map, &i); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | static const struct bpf_map_ops prog_array_ops = { |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 428 | .map_alloc = fd_array_map_alloc, |
| 429 | .map_free = fd_array_map_free, |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 430 | .map_get_next_key = array_map_get_next_key, |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 431 | .map_lookup_elem = fd_array_map_lookup_elem, |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 432 | .map_delete_elem = fd_array_map_delete_elem, |
| 433 | .map_fd_get_ptr = prog_fd_array_get_ptr, |
| 434 | .map_fd_put_ptr = prog_fd_array_put_ptr, |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 435 | }; |
| 436 | |
| 437 | static struct bpf_map_type_list prog_array_type __read_mostly = { |
| 438 | .ops = &prog_array_ops, |
| 439 | .type = BPF_MAP_TYPE_PROG_ARRAY, |
| 440 | }; |
| 441 | |
| 442 | static int __init register_prog_array_map(void) |
| 443 | { |
| 444 | bpf_register_map_type(&prog_array_type); |
| 445 | return 0; |
| 446 | } |
| 447 | late_initcall(register_prog_array_map); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 448 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 449 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
| 450 | struct file *map_file) |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 451 | { |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 452 | struct bpf_event_entry *ee; |
| 453 | |
Daniel Borkmann | 858d68f | 2016-07-16 01:15:55 +0200 | [diff] [blame] | 454 | ee = kzalloc(sizeof(*ee), GFP_ATOMIC); |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 455 | if (ee) { |
| 456 | ee->event = perf_file->private_data; |
| 457 | ee->perf_file = perf_file; |
| 458 | ee->map_file = map_file; |
| 459 | } |
| 460 | |
| 461 | return ee; |
| 462 | } |
| 463 | |
| 464 | static void __bpf_event_entry_free(struct rcu_head *rcu) |
| 465 | { |
| 466 | struct bpf_event_entry *ee; |
| 467 | |
| 468 | ee = container_of(rcu, struct bpf_event_entry, rcu); |
| 469 | fput(ee->perf_file); |
| 470 | kfree(ee); |
| 471 | } |
| 472 | |
| 473 | static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) |
| 474 | { |
| 475 | call_rcu(&ee->rcu, __bpf_event_entry_free); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 476 | } |
| 477 | |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 478 | static void *perf_event_fd_array_get_ptr(struct bpf_map *map, |
| 479 | struct file *map_file, int fd) |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 480 | { |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 481 | const struct perf_event_attr *attr; |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 482 | struct bpf_event_entry *ee; |
| 483 | struct perf_event *event; |
| 484 | struct file *perf_file; |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 485 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 486 | perf_file = perf_event_get(fd); |
| 487 | if (IS_ERR(perf_file)) |
| 488 | return perf_file; |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 489 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 490 | event = perf_file->private_data; |
| 491 | ee = ERR_PTR(-EINVAL); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 492 | |
| 493 | attr = perf_event_attrs(event); |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 494 | if (IS_ERR(attr) || attr->inherit) |
| 495 | goto err_out; |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 496 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 497 | switch (attr->type) { |
| 498 | case PERF_TYPE_SOFTWARE: |
| 499 | if (attr->config != PERF_COUNT_SW_BPF_OUTPUT) |
| 500 | goto err_out; |
| 501 | /* fall-through */ |
| 502 | case PERF_TYPE_RAW: |
| 503 | case PERF_TYPE_HARDWARE: |
| 504 | ee = bpf_event_entry_gen(perf_file, map_file); |
| 505 | if (ee) |
| 506 | return ee; |
| 507 | ee = ERR_PTR(-ENOMEM); |
| 508 | /* fall-through */ |
| 509 | default: |
| 510 | break; |
| 511 | } |
Alexei Starovoitov | 62544ce | 2015-10-22 17:10:14 -0700 | [diff] [blame] | 512 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 513 | err_out: |
| 514 | fput(perf_file); |
| 515 | return ee; |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | static void perf_event_fd_array_put_ptr(void *ptr) |
| 519 | { |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 520 | bpf_event_entry_free_rcu(ptr); |
| 521 | } |
| 522 | |
| 523 | static void perf_event_fd_array_release(struct bpf_map *map, |
| 524 | struct file *map_file) |
| 525 | { |
| 526 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 527 | struct bpf_event_entry *ee; |
| 528 | int i; |
| 529 | |
| 530 | rcu_read_lock(); |
| 531 | for (i = 0; i < array->map.max_entries; i++) { |
| 532 | ee = READ_ONCE(array->ptrs[i]); |
| 533 | if (ee && ee->map_file == map_file) |
| 534 | fd_array_map_delete_elem(map, &i); |
| 535 | } |
| 536 | rcu_read_unlock(); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 537 | } |
| 538 | |
| 539 | static const struct bpf_map_ops perf_event_array_ops = { |
| 540 | .map_alloc = fd_array_map_alloc, |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 541 | .map_free = fd_array_map_free, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 542 | .map_get_next_key = array_map_get_next_key, |
| 543 | .map_lookup_elem = fd_array_map_lookup_elem, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 544 | .map_delete_elem = fd_array_map_delete_elem, |
| 545 | .map_fd_get_ptr = perf_event_fd_array_get_ptr, |
| 546 | .map_fd_put_ptr = perf_event_fd_array_put_ptr, |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 547 | .map_release = perf_event_fd_array_release, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 548 | }; |
| 549 | |
| 550 | static struct bpf_map_type_list perf_event_array_type __read_mostly = { |
| 551 | .ops = &perf_event_array_ops, |
| 552 | .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, |
| 553 | }; |
| 554 | |
| 555 | static int __init register_perf_event_array_map(void) |
| 556 | { |
| 557 | bpf_register_map_type(&perf_event_array_type); |
| 558 | return 0; |
| 559 | } |
| 560 | late_initcall(register_perf_event_array_map); |
Martin KaFai Lau | 4ed8ec5 | 2016-06-30 10:28:43 -0700 | [diff] [blame] | 561 | |
Sargun Dhillon | 60d20f9 | 2016-08-12 08:56:52 -0700 | [diff] [blame] | 562 | #ifdef CONFIG_CGROUPS |
Martin KaFai Lau | 4ed8ec5 | 2016-06-30 10:28:43 -0700 | [diff] [blame] | 563 | static void *cgroup_fd_array_get_ptr(struct bpf_map *map, |
| 564 | struct file *map_file /* not used */, |
| 565 | int fd) |
| 566 | { |
| 567 | return cgroup_get_from_fd(fd); |
| 568 | } |
| 569 | |
| 570 | static void cgroup_fd_array_put_ptr(void *ptr) |
| 571 | { |
| 572 | /* cgroup_put free cgrp after a rcu grace period */ |
| 573 | cgroup_put(ptr); |
| 574 | } |
| 575 | |
| 576 | static void cgroup_fd_array_free(struct bpf_map *map) |
| 577 | { |
| 578 | bpf_fd_array_map_clear(map); |
| 579 | fd_array_map_free(map); |
| 580 | } |
| 581 | |
| 582 | static const struct bpf_map_ops cgroup_array_ops = { |
| 583 | .map_alloc = fd_array_map_alloc, |
| 584 | .map_free = cgroup_fd_array_free, |
| 585 | .map_get_next_key = array_map_get_next_key, |
| 586 | .map_lookup_elem = fd_array_map_lookup_elem, |
| 587 | .map_delete_elem = fd_array_map_delete_elem, |
| 588 | .map_fd_get_ptr = cgroup_fd_array_get_ptr, |
| 589 | .map_fd_put_ptr = cgroup_fd_array_put_ptr, |
| 590 | }; |
| 591 | |
| 592 | static struct bpf_map_type_list cgroup_array_type __read_mostly = { |
| 593 | .ops = &cgroup_array_ops, |
| 594 | .type = BPF_MAP_TYPE_CGROUP_ARRAY, |
| 595 | }; |
| 596 | |
| 597 | static int __init register_cgroup_array_map(void) |
| 598 | { |
| 599 | bpf_register_map_type(&cgroup_array_type); |
| 600 | return 0; |
| 601 | } |
| 602 | late_initcall(register_cgroup_array_map); |
| 603 | #endif |