Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, but |
| 8 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 10 | * General Public License for more details. |
| 11 | */ |
| 12 | #include <linux/bpf.h> |
| 13 | #include <linux/err.h> |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 14 | #include <linux/slab.h> |
| 15 | #include <linux/mm.h> |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 16 | #include <linux/filter.h> |
Daniel Borkmann | 0cdf5640 | 2015-10-02 18:42:00 +0200 | [diff] [blame] | 17 | #include <linux/perf_event.h> |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 18 | |
Chenbo Feng | 4672ded | 2017-10-18 13:00:22 -0700 | [diff] [blame^] | 19 | #define ARRAY_CREATE_FLAG_MASK \ |
| 20 | (BPF_F_RDONLY | BPF_F_WRONLY) |
| 21 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 22 | static void bpf_array_free_percpu(struct bpf_array *array) |
| 23 | { |
| 24 | int i; |
| 25 | |
| 26 | for (i = 0; i < array->map.max_entries; i++) |
| 27 | free_percpu(array->pptrs[i]); |
| 28 | } |
| 29 | |
| 30 | static int bpf_array_alloc_percpu(struct bpf_array *array) |
| 31 | { |
| 32 | void __percpu *ptr; |
| 33 | int i; |
| 34 | |
| 35 | for (i = 0; i < array->map.max_entries; i++) { |
| 36 | ptr = __alloc_percpu_gfp(array->elem_size, 8, |
| 37 | GFP_USER | __GFP_NOWARN); |
| 38 | if (!ptr) { |
| 39 | bpf_array_free_percpu(array); |
| 40 | return -ENOMEM; |
| 41 | } |
| 42 | array->pptrs[i] = ptr; |
| 43 | } |
| 44 | |
| 45 | return 0; |
| 46 | } |
| 47 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 48 | /* Called from syscall */ |
| 49 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) |
| 50 | { |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 51 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 52 | struct bpf_array *array; |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 53 | u64 array_size; |
| 54 | u32 elem_size; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 55 | |
| 56 | /* check sanity of attributes */ |
| 57 | if (attr->max_entries == 0 || attr->key_size != 4 || |
Chenbo Feng | 4672ded | 2017-10-18 13:00:22 -0700 | [diff] [blame^] | 58 | attr->value_size == 0 || |
| 59 | attr->map_flags & ~ARRAY_CREATE_FLAG_MASK) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 60 | return ERR_PTR(-EINVAL); |
| 61 | |
Alexei Starovoitov | 01b3f52 | 2015-11-29 16:59:35 -0800 | [diff] [blame] | 62 | if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1)) |
| 63 | /* if value_size is bigger, the user space won't be able to |
| 64 | * access the elements. |
| 65 | */ |
| 66 | return ERR_PTR(-E2BIG); |
| 67 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 68 | elem_size = round_up(attr->value_size, 8); |
| 69 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 70 | array_size = sizeof(*array); |
| 71 | if (percpu) |
| 72 | array_size += (u64) attr->max_entries * sizeof(void *); |
| 73 | else |
| 74 | array_size += (u64) attr->max_entries * elem_size; |
| 75 | |
| 76 | /* make sure there is no u32 overflow later in round_up() */ |
| 77 | if (array_size >= U32_MAX - PAGE_SIZE) |
Alexei Starovoitov | daaf427 | 2014-11-18 17:32:16 -0800 | [diff] [blame] | 78 | return ERR_PTR(-ENOMEM); |
| 79 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 80 | /* allocate all map elements and zero-initialize them */ |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 81 | array = bpf_map_area_alloc(array_size); |
| 82 | if (!array) |
| 83 | return ERR_PTR(-ENOMEM); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 84 | |
| 85 | /* copy mandatory map attributes */ |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 86 | array->map.map_type = attr->map_type; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 87 | array->map.key_size = attr->key_size; |
| 88 | array->map.value_size = attr->value_size; |
| 89 | array->map.max_entries = attr->max_entries; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 90 | array->elem_size = elem_size; |
| 91 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 92 | if (!percpu) |
| 93 | goto out; |
| 94 | |
| 95 | array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); |
| 96 | |
| 97 | if (array_size >= U32_MAX - PAGE_SIZE || |
| 98 | elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 99 | bpf_map_area_free(array); |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 100 | return ERR_PTR(-ENOMEM); |
| 101 | } |
| 102 | out: |
| 103 | array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; |
| 104 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 105 | return &array->map; |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | /* Called from syscall or from eBPF program */ |
| 109 | static void *array_map_lookup_elem(struct bpf_map *map, void *key) |
| 110 | { |
| 111 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 112 | u32 index = *(u32 *)key; |
| 113 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 114 | if (unlikely(index >= array->map.max_entries)) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 115 | return NULL; |
| 116 | |
| 117 | return array->value + array->elem_size * index; |
| 118 | } |
| 119 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 120 | /* Called from eBPF program */ |
| 121 | static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) |
| 122 | { |
| 123 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 124 | u32 index = *(u32 *)key; |
| 125 | |
| 126 | if (unlikely(index >= array->map.max_entries)) |
| 127 | return NULL; |
| 128 | |
| 129 | return this_cpu_ptr(array->pptrs[index]); |
| 130 | } |
| 131 | |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 132 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) |
| 133 | { |
| 134 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 135 | u32 index = *(u32 *)key; |
| 136 | void __percpu *pptr; |
| 137 | int cpu, off = 0; |
| 138 | u32 size; |
| 139 | |
| 140 | if (unlikely(index >= array->map.max_entries)) |
| 141 | return -ENOENT; |
| 142 | |
| 143 | /* per_cpu areas are zero-filled and bpf programs can only |
| 144 | * access 'value_size' of them, so copying rounded areas |
| 145 | * will not leak any kernel data |
| 146 | */ |
| 147 | size = round_up(map->value_size, 8); |
| 148 | rcu_read_lock(); |
| 149 | pptr = array->pptrs[index]; |
| 150 | for_each_possible_cpu(cpu) { |
| 151 | bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); |
| 152 | off += size; |
| 153 | } |
| 154 | rcu_read_unlock(); |
| 155 | return 0; |
| 156 | } |
| 157 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 158 | /* Called from syscall */ |
| 159 | static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 160 | { |
| 161 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 162 | u32 index = *(u32 *)key; |
| 163 | u32 *next = (u32 *)next_key; |
| 164 | |
| 165 | if (index >= array->map.max_entries) { |
| 166 | *next = 0; |
| 167 | return 0; |
| 168 | } |
| 169 | |
| 170 | if (index == array->map.max_entries - 1) |
| 171 | return -ENOENT; |
| 172 | |
| 173 | *next = index + 1; |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | /* Called from syscall or from eBPF program */ |
| 178 | static int array_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 179 | u64 map_flags) |
| 180 | { |
| 181 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 182 | u32 index = *(u32 *)key; |
| 183 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 184 | if (unlikely(map_flags > BPF_EXIST)) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 185 | /* unknown flags */ |
| 186 | return -EINVAL; |
| 187 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 188 | if (unlikely(index >= array->map.max_entries)) |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 189 | /* all elements were pre-allocated, cannot insert a new one */ |
| 190 | return -E2BIG; |
| 191 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 192 | if (unlikely(map_flags == BPF_NOEXIST)) |
Alexei Starovoitov | daaf427 | 2014-11-18 17:32:16 -0800 | [diff] [blame] | 193 | /* all elements already exist */ |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 194 | return -EEXIST; |
| 195 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 196 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
| 197 | memcpy(this_cpu_ptr(array->pptrs[index]), |
| 198 | value, map->value_size); |
| 199 | else |
| 200 | memcpy(array->value + array->elem_size * index, |
| 201 | value, map->value_size); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 202 | return 0; |
| 203 | } |
| 204 | |
Alexei Starovoitov | 15a07b3 | 2016-02-01 22:39:55 -0800 | [diff] [blame] | 205 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, |
| 206 | u64 map_flags) |
| 207 | { |
| 208 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 209 | u32 index = *(u32 *)key; |
| 210 | void __percpu *pptr; |
| 211 | int cpu, off = 0; |
| 212 | u32 size; |
| 213 | |
| 214 | if (unlikely(map_flags > BPF_EXIST)) |
| 215 | /* unknown flags */ |
| 216 | return -EINVAL; |
| 217 | |
| 218 | if (unlikely(index >= array->map.max_entries)) |
| 219 | /* all elements were pre-allocated, cannot insert a new one */ |
| 220 | return -E2BIG; |
| 221 | |
| 222 | if (unlikely(map_flags == BPF_NOEXIST)) |
| 223 | /* all elements already exist */ |
| 224 | return -EEXIST; |
| 225 | |
| 226 | /* the user space will provide round_up(value_size, 8) bytes that |
| 227 | * will be copied into per-cpu area. bpf programs can only access |
| 228 | * value_size of it. During lookup the same extra bytes will be |
| 229 | * returned or zeros which were zero-filled by percpu_alloc, |
| 230 | * so no kernel data leaks possible |
| 231 | */ |
| 232 | size = round_up(map->value_size, 8); |
| 233 | rcu_read_lock(); |
| 234 | pptr = array->pptrs[index]; |
| 235 | for_each_possible_cpu(cpu) { |
| 236 | bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); |
| 237 | off += size; |
| 238 | } |
| 239 | rcu_read_unlock(); |
| 240 | return 0; |
| 241 | } |
| 242 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 243 | /* Called from syscall or from eBPF program */ |
| 244 | static int array_map_delete_elem(struct bpf_map *map, void *key) |
| 245 | { |
| 246 | return -EINVAL; |
| 247 | } |
| 248 | |
| 249 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
| 250 | static void array_map_free(struct bpf_map *map) |
| 251 | { |
| 252 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 253 | |
| 254 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, |
| 255 | * so the programs (can be more than one that used this map) were |
| 256 | * disconnected from events. Wait for outstanding programs to complete |
| 257 | * and free the array |
| 258 | */ |
| 259 | synchronize_rcu(); |
| 260 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 261 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
| 262 | bpf_array_free_percpu(array); |
| 263 | |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 264 | bpf_map_area_free(array); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 265 | } |
| 266 | |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 267 | static const struct bpf_map_ops array_ops = { |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 268 | .map_alloc = array_map_alloc, |
| 269 | .map_free = array_map_free, |
| 270 | .map_get_next_key = array_map_get_next_key, |
| 271 | .map_lookup_elem = array_map_lookup_elem, |
| 272 | .map_update_elem = array_map_update_elem, |
| 273 | .map_delete_elem = array_map_delete_elem, |
| 274 | }; |
| 275 | |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 276 | static struct bpf_map_type_list array_type __read_mostly = { |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 277 | .ops = &array_ops, |
| 278 | .type = BPF_MAP_TYPE_ARRAY, |
| 279 | }; |
| 280 | |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 281 | static const struct bpf_map_ops percpu_array_ops = { |
| 282 | .map_alloc = array_map_alloc, |
| 283 | .map_free = array_map_free, |
| 284 | .map_get_next_key = array_map_get_next_key, |
| 285 | .map_lookup_elem = percpu_array_map_lookup_elem, |
| 286 | .map_update_elem = array_map_update_elem, |
| 287 | .map_delete_elem = array_map_delete_elem, |
| 288 | }; |
| 289 | |
| 290 | static struct bpf_map_type_list percpu_array_type __read_mostly = { |
| 291 | .ops = &percpu_array_ops, |
| 292 | .type = BPF_MAP_TYPE_PERCPU_ARRAY, |
| 293 | }; |
| 294 | |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 295 | static int __init register_array_map(void) |
| 296 | { |
Daniel Borkmann | a2c83ff | 2015-03-01 12:31:42 +0100 | [diff] [blame] | 297 | bpf_register_map_type(&array_type); |
Alexei Starovoitov | a10423b | 2016-02-01 22:39:54 -0800 | [diff] [blame] | 298 | bpf_register_map_type(&percpu_array_type); |
Alexei Starovoitov | 28fbcfa | 2014-11-13 17:36:46 -0800 | [diff] [blame] | 299 | return 0; |
| 300 | } |
| 301 | late_initcall(register_array_map); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 302 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 303 | static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 304 | { |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 305 | /* only file descriptors can be stored in this type of map */ |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 306 | if (attr->value_size != sizeof(u32)) |
| 307 | return ERR_PTR(-EINVAL); |
| 308 | return array_map_alloc(attr); |
| 309 | } |
| 310 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 311 | static void fd_array_map_free(struct bpf_map *map) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 312 | { |
| 313 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 314 | int i; |
| 315 | |
| 316 | synchronize_rcu(); |
| 317 | |
| 318 | /* make sure it's empty */ |
| 319 | for (i = 0; i < array->map.max_entries; i++) |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 320 | BUG_ON(array->ptrs[i] != NULL); |
Daniel Borkmann | 251d00b | 2017-01-18 15:14:17 +0100 | [diff] [blame] | 321 | |
| 322 | bpf_map_area_free(array); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 323 | } |
| 324 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 325 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 326 | { |
| 327 | return NULL; |
| 328 | } |
| 329 | |
| 330 | /* only called from syscall */ |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 331 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
| 332 | void *key, void *value, u64 map_flags) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 333 | { |
| 334 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 335 | void *new_ptr, *old_ptr; |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 336 | u32 index = *(u32 *)key, ufd; |
| 337 | |
| 338 | if (map_flags != BPF_ANY) |
| 339 | return -EINVAL; |
| 340 | |
| 341 | if (index >= array->map.max_entries) |
| 342 | return -E2BIG; |
| 343 | |
| 344 | ufd = *(u32 *)value; |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 345 | new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 346 | if (IS_ERR(new_ptr)) |
| 347 | return PTR_ERR(new_ptr); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 348 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 349 | old_ptr = xchg(array->ptrs + index, new_ptr); |
| 350 | if (old_ptr) |
| 351 | map->ops->map_fd_put_ptr(old_ptr); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 352 | |
| 353 | return 0; |
| 354 | } |
| 355 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 356 | static int fd_array_map_delete_elem(struct bpf_map *map, void *key) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 357 | { |
| 358 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 359 | void *old_ptr; |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 360 | u32 index = *(u32 *)key; |
| 361 | |
| 362 | if (index >= array->map.max_entries) |
| 363 | return -E2BIG; |
| 364 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 365 | old_ptr = xchg(array->ptrs + index, NULL); |
| 366 | if (old_ptr) { |
| 367 | map->ops->map_fd_put_ptr(old_ptr); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 368 | return 0; |
| 369 | } else { |
| 370 | return -ENOENT; |
| 371 | } |
| 372 | } |
| 373 | |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 374 | static void *prog_fd_array_get_ptr(struct bpf_map *map, |
| 375 | struct file *map_file, int fd) |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 376 | { |
| 377 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 378 | struct bpf_prog *prog = bpf_prog_get(fd); |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 379 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 380 | if (IS_ERR(prog)) |
| 381 | return prog; |
| 382 | |
| 383 | if (!bpf_prog_array_compatible(array, prog)) { |
| 384 | bpf_prog_put(prog); |
| 385 | return ERR_PTR(-EINVAL); |
| 386 | } |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 387 | |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 388 | return prog; |
| 389 | } |
| 390 | |
| 391 | static void prog_fd_array_put_ptr(void *ptr) |
| 392 | { |
Daniel Borkmann | 1aacde3 | 2016-06-30 17:24:43 +0200 | [diff] [blame] | 393 | bpf_prog_put(ptr); |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 394 | } |
| 395 | |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 396 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 397 | void bpf_fd_array_map_clear(struct bpf_map *map) |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 398 | { |
| 399 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 400 | int i; |
| 401 | |
| 402 | for (i = 0; i < array->map.max_entries; i++) |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 403 | fd_array_map_delete_elem(map, &i); |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 404 | } |
| 405 | |
| 406 | static const struct bpf_map_ops prog_array_ops = { |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 407 | .map_alloc = fd_array_map_alloc, |
| 408 | .map_free = fd_array_map_free, |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 409 | .map_get_next_key = array_map_get_next_key, |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 410 | .map_lookup_elem = fd_array_map_lookup_elem, |
Wang Nan | 2a36f0b | 2015-08-06 07:02:33 +0000 | [diff] [blame] | 411 | .map_delete_elem = fd_array_map_delete_elem, |
| 412 | .map_fd_get_ptr = prog_fd_array_get_ptr, |
| 413 | .map_fd_put_ptr = prog_fd_array_put_ptr, |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 414 | }; |
| 415 | |
| 416 | static struct bpf_map_type_list prog_array_type __read_mostly = { |
| 417 | .ops = &prog_array_ops, |
| 418 | .type = BPF_MAP_TYPE_PROG_ARRAY, |
| 419 | }; |
| 420 | |
| 421 | static int __init register_prog_array_map(void) |
| 422 | { |
| 423 | bpf_register_map_type(&prog_array_type); |
| 424 | return 0; |
| 425 | } |
| 426 | late_initcall(register_prog_array_map); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 427 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 428 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
| 429 | struct file *map_file) |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 430 | { |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 431 | struct bpf_event_entry *ee; |
| 432 | |
Daniel Borkmann | 858d68f | 2016-07-16 01:15:55 +0200 | [diff] [blame] | 433 | ee = kzalloc(sizeof(*ee), GFP_ATOMIC); |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 434 | if (ee) { |
| 435 | ee->event = perf_file->private_data; |
| 436 | ee->perf_file = perf_file; |
| 437 | ee->map_file = map_file; |
| 438 | } |
| 439 | |
| 440 | return ee; |
| 441 | } |
| 442 | |
| 443 | static void __bpf_event_entry_free(struct rcu_head *rcu) |
| 444 | { |
| 445 | struct bpf_event_entry *ee; |
| 446 | |
| 447 | ee = container_of(rcu, struct bpf_event_entry, rcu); |
| 448 | fput(ee->perf_file); |
| 449 | kfree(ee); |
| 450 | } |
| 451 | |
| 452 | static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) |
| 453 | { |
| 454 | call_rcu(&ee->rcu, __bpf_event_entry_free); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 455 | } |
| 456 | |
Daniel Borkmann | d056a78 | 2016-06-15 22:47:13 +0200 | [diff] [blame] | 457 | static void *perf_event_fd_array_get_ptr(struct bpf_map *map, |
| 458 | struct file *map_file, int fd) |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 459 | { |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 460 | const struct perf_event_attr *attr; |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 461 | struct bpf_event_entry *ee; |
| 462 | struct perf_event *event; |
| 463 | struct file *perf_file; |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 464 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 465 | perf_file = perf_event_get(fd); |
| 466 | if (IS_ERR(perf_file)) |
| 467 | return perf_file; |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 468 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 469 | event = perf_file->private_data; |
| 470 | ee = ERR_PTR(-EINVAL); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 471 | |
| 472 | attr = perf_event_attrs(event); |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 473 | if (IS_ERR(attr) || attr->inherit) |
| 474 | goto err_out; |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 475 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 476 | switch (attr->type) { |
| 477 | case PERF_TYPE_SOFTWARE: |
| 478 | if (attr->config != PERF_COUNT_SW_BPF_OUTPUT) |
| 479 | goto err_out; |
| 480 | /* fall-through */ |
| 481 | case PERF_TYPE_RAW: |
| 482 | case PERF_TYPE_HARDWARE: |
| 483 | ee = bpf_event_entry_gen(perf_file, map_file); |
| 484 | if (ee) |
| 485 | return ee; |
| 486 | ee = ERR_PTR(-ENOMEM); |
| 487 | /* fall-through */ |
| 488 | default: |
| 489 | break; |
| 490 | } |
Alexei Starovoitov | 62544ce | 2015-10-22 17:10:14 -0700 | [diff] [blame] | 491 | |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 492 | err_out: |
| 493 | fput(perf_file); |
| 494 | return ee; |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 495 | } |
| 496 | |
| 497 | static void perf_event_fd_array_put_ptr(void *ptr) |
| 498 | { |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 499 | bpf_event_entry_free_rcu(ptr); |
| 500 | } |
| 501 | |
| 502 | static void perf_event_fd_array_release(struct bpf_map *map, |
| 503 | struct file *map_file) |
| 504 | { |
| 505 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 506 | struct bpf_event_entry *ee; |
| 507 | int i; |
| 508 | |
| 509 | rcu_read_lock(); |
| 510 | for (i = 0; i < array->map.max_entries; i++) { |
| 511 | ee = READ_ONCE(array->ptrs[i]); |
| 512 | if (ee && ee->map_file == map_file) |
| 513 | fd_array_map_delete_elem(map, &i); |
| 514 | } |
| 515 | rcu_read_unlock(); |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | static const struct bpf_map_ops perf_event_array_ops = { |
| 519 | .map_alloc = fd_array_map_alloc, |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 520 | .map_free = fd_array_map_free, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 521 | .map_get_next_key = array_map_get_next_key, |
| 522 | .map_lookup_elem = fd_array_map_lookup_elem, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 523 | .map_delete_elem = fd_array_map_delete_elem, |
| 524 | .map_fd_get_ptr = perf_event_fd_array_get_ptr, |
| 525 | .map_fd_put_ptr = perf_event_fd_array_put_ptr, |
Daniel Borkmann | 3b1efb1 | 2016-06-15 22:47:14 +0200 | [diff] [blame] | 526 | .map_release = perf_event_fd_array_release, |
Kaixu Xia | ea317b2 | 2015-08-06 07:02:34 +0000 | [diff] [blame] | 527 | }; |
| 528 | |
| 529 | static struct bpf_map_type_list perf_event_array_type __read_mostly = { |
| 530 | .ops = &perf_event_array_ops, |
| 531 | .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, |
| 532 | }; |
| 533 | |
| 534 | static int __init register_perf_event_array_map(void) |
| 535 | { |
| 536 | bpf_register_map_type(&perf_event_array_type); |
| 537 | return 0; |
| 538 | } |
| 539 | late_initcall(register_perf_event_array_map); |
Martin KaFai Lau | 4ed8ec5 | 2016-06-30 10:28:43 -0700 | [diff] [blame] | 540 | |
Sargun Dhillon | 60d20f9 | 2016-08-12 08:56:52 -0700 | [diff] [blame] | 541 | #ifdef CONFIG_CGROUPS |
Martin KaFai Lau | 4ed8ec5 | 2016-06-30 10:28:43 -0700 | [diff] [blame] | 542 | static void *cgroup_fd_array_get_ptr(struct bpf_map *map, |
| 543 | struct file *map_file /* not used */, |
| 544 | int fd) |
| 545 | { |
| 546 | return cgroup_get_from_fd(fd); |
| 547 | } |
| 548 | |
| 549 | static void cgroup_fd_array_put_ptr(void *ptr) |
| 550 | { |
| 551 | /* cgroup_put free cgrp after a rcu grace period */ |
| 552 | cgroup_put(ptr); |
| 553 | } |
| 554 | |
| 555 | static void cgroup_fd_array_free(struct bpf_map *map) |
| 556 | { |
| 557 | bpf_fd_array_map_clear(map); |
| 558 | fd_array_map_free(map); |
| 559 | } |
| 560 | |
| 561 | static const struct bpf_map_ops cgroup_array_ops = { |
| 562 | .map_alloc = fd_array_map_alloc, |
| 563 | .map_free = cgroup_fd_array_free, |
| 564 | .map_get_next_key = array_map_get_next_key, |
| 565 | .map_lookup_elem = fd_array_map_lookup_elem, |
| 566 | .map_delete_elem = fd_array_map_delete_elem, |
| 567 | .map_fd_get_ptr = cgroup_fd_array_get_ptr, |
| 568 | .map_fd_put_ptr = cgroup_fd_array_put_ptr, |
| 569 | }; |
| 570 | |
| 571 | static struct bpf_map_type_list cgroup_array_type __read_mostly = { |
| 572 | .ops = &cgroup_array_ops, |
| 573 | .type = BPF_MAP_TYPE_CGROUP_ARRAY, |
| 574 | }; |
| 575 | |
| 576 | static int __init register_cgroup_array_map(void) |
| 577 | { |
| 578 | bpf_register_map_type(&cgroup_array_type); |
| 579 | return 0; |
| 580 | } |
| 581 | late_initcall(register_cgroup_array_map); |
| 582 | #endif |