blob: fab52955fda18514b263edcaafa37ea2f9eca77b [file] [log] [blame]
Todd Kjosb9341022016-10-10 10:40:53 -07001/* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <asm/cacheflush.h>
21#include <linux/list.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/rtmutex.h>
25#include <linux/rbtree.h>
26#include <linux/seq_file.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include "binder_alloc.h"
31#include "binder_trace.h"
32
33static DEFINE_MUTEX(binder_alloc_mmap_lock);
34
35enum {
36 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39};
40static uint32_t binder_alloc_debug_mask;
41
42module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45#define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
48 pr_info(x); \
49 } while (0)
50
Sherry Yang7aed47a2017-06-30 10:22:23 -070051static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52{
53 return list_entry(buffer->entry.next, struct binder_buffer, entry);
54}
55
56static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57{
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59}
60
Todd Kjosb9341022016-10-10 10:40:53 -070061static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 struct binder_buffer *buffer)
63{
64 if (list_is_last(&buffer->entry, &alloc->buffers))
Sherry Yang7ffd4942017-08-03 11:33:53 -070065 return (u8 *)alloc->buffer +
66 alloc->buffer_size - (u8 *)buffer->data;
67 return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
Todd Kjosb9341022016-10-10 10:40:53 -070068}
69
70static void binder_insert_free_buffer(struct binder_alloc *alloc,
71 struct binder_buffer *new_buffer)
72{
73 struct rb_node **p = &alloc->free_buffers.rb_node;
74 struct rb_node *parent = NULL;
75 struct binder_buffer *buffer;
76 size_t buffer_size;
77 size_t new_buffer_size;
78
79 BUG_ON(!new_buffer->free);
80
81 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
82
83 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
84 "%d: add free buffer, size %zd, at %pK\n",
85 alloc->pid, new_buffer_size, new_buffer);
86
87 while (*p) {
88 parent = *p;
89 buffer = rb_entry(parent, struct binder_buffer, rb_node);
90 BUG_ON(!buffer->free);
91
92 buffer_size = binder_alloc_buffer_size(alloc, buffer);
93
94 if (new_buffer_size < buffer_size)
95 p = &parent->rb_left;
96 else
97 p = &parent->rb_right;
98 }
99 rb_link_node(&new_buffer->rb_node, parent, p);
100 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
101}
102
103static void binder_insert_allocated_buffer_locked(
104 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
105{
106 struct rb_node **p = &alloc->allocated_buffers.rb_node;
107 struct rb_node *parent = NULL;
108 struct binder_buffer *buffer;
109
110 BUG_ON(new_buffer->free);
111
112 while (*p) {
113 parent = *p;
114 buffer = rb_entry(parent, struct binder_buffer, rb_node);
115 BUG_ON(buffer->free);
116
Sherry Yang7ffd4942017-08-03 11:33:53 -0700117 if (new_buffer->data < buffer->data)
Todd Kjosb9341022016-10-10 10:40:53 -0700118 p = &parent->rb_left;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700119 else if (new_buffer->data > buffer->data)
Todd Kjosb9341022016-10-10 10:40:53 -0700120 p = &parent->rb_right;
121 else
122 BUG();
123 }
124 rb_link_node(&new_buffer->rb_node, parent, p);
125 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
126}
127
Todd Kjos076072a2017-04-21 14:32:11 -0700128static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjosb9341022016-10-10 10:40:53 -0700129 struct binder_alloc *alloc,
130 uintptr_t user_ptr)
131{
132 struct rb_node *n = alloc->allocated_buffers.rb_node;
133 struct binder_buffer *buffer;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700134 void *kern_ptr;
Todd Kjosb9341022016-10-10 10:40:53 -0700135
Sherry Yang7ffd4942017-08-03 11:33:53 -0700136 kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
Todd Kjosb9341022016-10-10 10:40:53 -0700137
138 while (n) {
139 buffer = rb_entry(n, struct binder_buffer, rb_node);
140 BUG_ON(buffer->free);
141
Sherry Yang7ffd4942017-08-03 11:33:53 -0700142 if (kern_ptr < buffer->data)
Todd Kjosb9341022016-10-10 10:40:53 -0700143 n = n->rb_left;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700144 else if (kern_ptr > buffer->data)
Todd Kjosb9341022016-10-10 10:40:53 -0700145 n = n->rb_right;
Todd Kjos076072a2017-04-21 14:32:11 -0700146 else {
147 /*
148 * Guard against user threads attempting to
149 * free the buffer twice
150 */
151 if (buffer->free_in_progress) {
152 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
153 alloc->pid, current->pid, (u64)user_ptr);
154 return NULL;
155 }
156 buffer->free_in_progress = 1;
Todd Kjosb9341022016-10-10 10:40:53 -0700157 return buffer;
Todd Kjos076072a2017-04-21 14:32:11 -0700158 }
Todd Kjosb9341022016-10-10 10:40:53 -0700159 }
160 return NULL;
161}
162
163/**
164 * binder_alloc_buffer_lookup() - get buffer given user ptr
165 * @alloc: binder_alloc for this proc
166 * @user_ptr: User pointer to buffer data
167 *
168 * Validate userspace pointer to buffer data and return buffer corresponding to
169 * that user pointer. Search the rb tree for buffer that matches user data
170 * pointer.
171 *
172 * Return: Pointer to buffer or NULL
173 */
Todd Kjos076072a2017-04-21 14:32:11 -0700174struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
175 uintptr_t user_ptr)
Todd Kjosb9341022016-10-10 10:40:53 -0700176{
177 struct binder_buffer *buffer;
178
179 mutex_lock(&alloc->mutex);
Todd Kjos076072a2017-04-21 14:32:11 -0700180 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Todd Kjosb9341022016-10-10 10:40:53 -0700181 mutex_unlock(&alloc->mutex);
182 return buffer;
183}
184
185static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
186 void *start, void *end,
187 struct vm_area_struct *vma)
188{
189 void *page_addr;
190 unsigned long user_page_addr;
191 struct page **page;
192 struct mm_struct *mm;
193
194 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
195 "%d: %s pages %pK-%pK\n", alloc->pid,
196 allocate ? "allocate" : "free", start, end);
197
198 if (end <= start)
199 return 0;
200
201 trace_binder_update_page_range(alloc, allocate, start, end);
202
203 if (vma)
204 mm = NULL;
205 else
206 mm = get_task_mm(alloc->tsk);
207
208 if (mm) {
209 down_write(&mm->mmap_sem);
210 vma = alloc->vma;
211 if (vma && mm != alloc->vma_vm_mm) {
212 pr_err("%d: vma mm and task mm mismatch\n",
213 alloc->pid);
214 vma = NULL;
215 }
216 }
217
218 if (allocate == 0)
219 goto free_range;
220
221 if (vma == NULL) {
222 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
223 alloc->pid);
224 goto err_no_vma;
225 }
226
227 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
228 int ret;
229
230 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
231
232 BUG_ON(*page);
233 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
234 if (*page == NULL) {
235 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
236 alloc->pid, page_addr);
237 goto err_alloc_page_failed;
238 }
239 ret = map_kernel_range_noflush((unsigned long)page_addr,
240 PAGE_SIZE, PAGE_KERNEL, page);
241 flush_cache_vmap((unsigned long)page_addr,
242 (unsigned long)page_addr + PAGE_SIZE);
243 if (ret != 1) {
244 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
245 alloc->pid, page_addr);
246 goto err_map_kernel_failed;
247 }
248 user_page_addr =
249 (uintptr_t)page_addr + alloc->user_buffer_offset;
250 ret = vm_insert_page(vma, user_page_addr, page[0]);
251 if (ret) {
252 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
253 alloc->pid, user_page_addr);
254 goto err_vm_insert_page_failed;
255 }
256 /* vm_insert_page does not seem to increment the refcount */
257 }
258 if (mm) {
259 up_write(&mm->mmap_sem);
260 mmput(mm);
261 }
262 return 0;
263
264free_range:
265 for (page_addr = end - PAGE_SIZE; page_addr >= start;
266 page_addr -= PAGE_SIZE) {
267 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
268 if (vma)
269 zap_page_range(vma, (uintptr_t)page_addr +
270 alloc->user_buffer_offset, PAGE_SIZE, NULL);
271err_vm_insert_page_failed:
272 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
273err_map_kernel_failed:
274 __free_page(*page);
275 *page = NULL;
276err_alloc_page_failed:
277 ;
278 }
279err_no_vma:
280 if (mm) {
281 up_write(&mm->mmap_sem);
282 mmput(mm);
283 }
Todd Kjose598d172017-03-22 17:19:52 -0700284 return vma ? -ENOMEM : -ESRCH;
Todd Kjosb9341022016-10-10 10:40:53 -0700285}
286
287struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
288 size_t data_size,
289 size_t offsets_size,
290 size_t extra_buffers_size,
291 int is_async)
292{
293 struct rb_node *n = alloc->free_buffers.rb_node;
294 struct binder_buffer *buffer;
295 size_t buffer_size;
296 struct rb_node *best_fit = NULL;
297 void *has_page_addr;
298 void *end_page_addr;
299 size_t size, data_offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700300 int ret;
Todd Kjosb9341022016-10-10 10:40:53 -0700301
302 if (alloc->vma == NULL) {
303 pr_err("%d: binder_alloc_buf, no vma\n",
304 alloc->pid);
Todd Kjose598d172017-03-22 17:19:52 -0700305 return ERR_PTR(-ESRCH);
Todd Kjosb9341022016-10-10 10:40:53 -0700306 }
307
308 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
309 ALIGN(offsets_size, sizeof(void *));
310
311 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
312 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
313 "%d: got transaction with invalid size %zd-%zd\n",
314 alloc->pid, data_size, offsets_size);
Todd Kjose598d172017-03-22 17:19:52 -0700315 return ERR_PTR(-EINVAL);
Todd Kjosb9341022016-10-10 10:40:53 -0700316 }
317 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
318 if (size < data_offsets_size || size < extra_buffers_size) {
319 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
320 "%d: got transaction with invalid extra_buffers_size %zd\n",
321 alloc->pid, extra_buffers_size);
Todd Kjose598d172017-03-22 17:19:52 -0700322 return ERR_PTR(-EINVAL);
Todd Kjosb9341022016-10-10 10:40:53 -0700323 }
324 if (is_async &&
325 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
326 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
327 "%d: binder_alloc_buf size %zd failed, no async space left\n",
328 alloc->pid, size);
Todd Kjose598d172017-03-22 17:19:52 -0700329 return ERR_PTR(-ENOSPC);
Todd Kjosb9341022016-10-10 10:40:53 -0700330 }
331
Sherry Yang7ffd4942017-08-03 11:33:53 -0700332 /* Pad 0-size buffers so they get assigned unique addresses */
333 size = max(size, sizeof(void *));
334
Todd Kjosb9341022016-10-10 10:40:53 -0700335 while (n) {
336 buffer = rb_entry(n, struct binder_buffer, rb_node);
337 BUG_ON(!buffer->free);
338 buffer_size = binder_alloc_buffer_size(alloc, buffer);
339
340 if (size < buffer_size) {
341 best_fit = n;
342 n = n->rb_left;
343 } else if (size > buffer_size)
344 n = n->rb_right;
345 else {
346 best_fit = n;
347 break;
348 }
349 }
350 if (best_fit == NULL) {
Martijn Coenen970df8b2017-03-15 18:22:52 +0100351 size_t allocated_buffers = 0;
352 size_t largest_alloc_size = 0;
353 size_t total_alloc_size = 0;
354 size_t free_buffers = 0;
355 size_t largest_free_size = 0;
356 size_t total_free_size = 0;
357
358 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
359 n = rb_next(n)) {
360 buffer = rb_entry(n, struct binder_buffer, rb_node);
361 buffer_size = binder_alloc_buffer_size(alloc, buffer);
362 allocated_buffers++;
363 total_alloc_size += buffer_size;
364 if (buffer_size > largest_alloc_size)
365 largest_alloc_size = buffer_size;
366 }
367 for (n = rb_first(&alloc->free_buffers); n != NULL;
368 n = rb_next(n)) {
369 buffer = rb_entry(n, struct binder_buffer, rb_node);
370 buffer_size = binder_alloc_buffer_size(alloc, buffer);
371 free_buffers++;
372 total_free_size += buffer_size;
373 if (buffer_size > largest_free_size)
374 largest_free_size = buffer_size;
375 }
Todd Kjosb9341022016-10-10 10:40:53 -0700376 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
377 alloc->pid, size);
Martijn Coenen970df8b2017-03-15 18:22:52 +0100378 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
379 total_alloc_size, allocated_buffers, largest_alloc_size,
380 total_free_size, free_buffers, largest_free_size);
Todd Kjose598d172017-03-22 17:19:52 -0700381 return ERR_PTR(-ENOSPC);
Todd Kjosb9341022016-10-10 10:40:53 -0700382 }
383 if (n == NULL) {
384 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
385 buffer_size = binder_alloc_buffer_size(alloc, buffer);
386 }
387
388 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
389 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
390 alloc->pid, size, buffer, buffer_size);
391
392 has_page_addr =
393 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700394 WARN_ON(n && buffer_size != size);
Todd Kjosb9341022016-10-10 10:40:53 -0700395 end_page_addr =
Sherry Yang7ffd4942017-08-03 11:33:53 -0700396 (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
Todd Kjosb9341022016-10-10 10:40:53 -0700397 if (end_page_addr > has_page_addr)
398 end_page_addr = has_page_addr;
Todd Kjose598d172017-03-22 17:19:52 -0700399 ret = binder_update_page_range(alloc, 1,
400 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
401 if (ret)
402 return ERR_PTR(ret);
Todd Kjosb9341022016-10-10 10:40:53 -0700403
Todd Kjosb9341022016-10-10 10:40:53 -0700404 if (buffer_size != size) {
Sherry Yang7ffd4942017-08-03 11:33:53 -0700405 struct binder_buffer *new_buffer;
Todd Kjosb9341022016-10-10 10:40:53 -0700406
Sherry Yang7ffd4942017-08-03 11:33:53 -0700407 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
408 if (!new_buffer) {
409 pr_err("%s: %d failed to alloc new buffer struct\n",
410 __func__, alloc->pid);
411 goto err_alloc_buf_struct_failed;
412 }
413 new_buffer->data = (u8 *)buffer->data + size;
Todd Kjosb9341022016-10-10 10:40:53 -0700414 list_add(&new_buffer->entry, &buffer->entry);
415 new_buffer->free = 1;
416 binder_insert_free_buffer(alloc, new_buffer);
417 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700418
419 rb_erase(best_fit, &alloc->free_buffers);
420 buffer->free = 0;
421 buffer->free_in_progress = 0;
422 binder_insert_allocated_buffer_locked(alloc, buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700423 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
424 "%d: binder_alloc_buf size %zd got %pK\n",
425 alloc->pid, size, buffer);
426 buffer->data_size = data_size;
427 buffer->offsets_size = offsets_size;
428 buffer->async_transaction = is_async;
429 buffer->extra_buffers_size = extra_buffers_size;
430 if (is_async) {
431 alloc->free_async_space -= size + sizeof(struct binder_buffer);
432 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
433 "%d: binder_alloc_buf size %zd async free %zd\n",
434 alloc->pid, size, alloc->free_async_space);
435 }
436 return buffer;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700437
438err_alloc_buf_struct_failed:
439 binder_update_page_range(alloc, 0,
440 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
441 end_page_addr, NULL);
442 return ERR_PTR(-ENOMEM);
Todd Kjosb9341022016-10-10 10:40:53 -0700443}
444
445/**
446 * binder_alloc_new_buf() - Allocate a new binder buffer
447 * @alloc: binder_alloc for this proc
448 * @data_size: size of user data buffer
449 * @offsets_size: user specified buffer offset
450 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
451 * @is_async: buffer for async transaction
452 *
453 * Allocate a new buffer given the requested sizes. Returns
454 * the kernel version of the buffer pointer. The size allocated
455 * is the sum of the three given sizes (each rounded up to
456 * pointer-sized boundary)
457 *
458 * Return: The allocated buffer or %NULL if error
459 */
460struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
461 size_t data_size,
462 size_t offsets_size,
463 size_t extra_buffers_size,
464 int is_async)
465{
466 struct binder_buffer *buffer;
467
468 mutex_lock(&alloc->mutex);
469 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
470 extra_buffers_size, is_async);
471 mutex_unlock(&alloc->mutex);
472 return buffer;
473}
474
475static void *buffer_start_page(struct binder_buffer *buffer)
476{
Sherry Yang7ffd4942017-08-03 11:33:53 -0700477 return (void *)((uintptr_t)buffer->data & PAGE_MASK);
Todd Kjosb9341022016-10-10 10:40:53 -0700478}
479
Sherry Yang7ffd4942017-08-03 11:33:53 -0700480static void *prev_buffer_end_page(struct binder_buffer *buffer)
Todd Kjosb9341022016-10-10 10:40:53 -0700481{
Sherry Yang7ffd4942017-08-03 11:33:53 -0700482 return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
Todd Kjosb9341022016-10-10 10:40:53 -0700483}
484
485static void binder_delete_free_buffer(struct binder_alloc *alloc,
486 struct binder_buffer *buffer)
487{
488 struct binder_buffer *prev, *next = NULL;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700489 bool to_free = true;
Todd Kjosb9341022016-10-10 10:40:53 -0700490 BUG_ON(alloc->buffers.next == &buffer->entry);
Sherry Yang7aed47a2017-06-30 10:22:23 -0700491 prev = binder_buffer_prev(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700492 BUG_ON(!prev->free);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700493 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
494 to_free = false;
Todd Kjosb9341022016-10-10 10:40:53 -0700495 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang7ffd4942017-08-03 11:33:53 -0700496 "%d: merge free, buffer %pK share page with %pK\n",
497 alloc->pid, buffer->data, prev->data);
Todd Kjosb9341022016-10-10 10:40:53 -0700498 }
499
500 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yang7aed47a2017-06-30 10:22:23 -0700501 next = binder_buffer_next(buffer);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700502 if (buffer_start_page(next) == buffer_start_page(buffer)) {
503 to_free = false;
Todd Kjosb9341022016-10-10 10:40:53 -0700504 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang7ffd4942017-08-03 11:33:53 -0700505 "%d: merge free, buffer %pK share page with %pK\n",
506 alloc->pid,
507 buffer->data,
508 next->data);
Todd Kjosb9341022016-10-10 10:40:53 -0700509 }
510 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700511
512 if (PAGE_ALIGNED(buffer->data)) {
Todd Kjosb9341022016-10-10 10:40:53 -0700513 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang7ffd4942017-08-03 11:33:53 -0700514 "%d: merge free, buffer start %pK is page aligned\n",
515 alloc->pid, buffer->data);
516 to_free = false;
Todd Kjosb9341022016-10-10 10:40:53 -0700517 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700518
519 if (to_free) {
520 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
521 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
522 alloc->pid, buffer->data,
523 prev->data, next->data);
524 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
525 buffer_start_page(buffer) + PAGE_SIZE,
526 NULL);
527 }
528 list_del(&buffer->entry);
529 kfree(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700530}
531
532static void binder_free_buf_locked(struct binder_alloc *alloc,
533 struct binder_buffer *buffer)
534{
535 size_t size, buffer_size;
536
537 buffer_size = binder_alloc_buffer_size(alloc, buffer);
538
539 size = ALIGN(buffer->data_size, sizeof(void *)) +
540 ALIGN(buffer->offsets_size, sizeof(void *)) +
541 ALIGN(buffer->extra_buffers_size, sizeof(void *));
542
543 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
544 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
545 alloc->pid, buffer, size, buffer_size);
546
547 BUG_ON(buffer->free);
548 BUG_ON(size > buffer_size);
549 BUG_ON(buffer->transaction != NULL);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700550 BUG_ON(buffer->data < alloc->buffer);
551 BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
Todd Kjosb9341022016-10-10 10:40:53 -0700552
553 if (buffer->async_transaction) {
554 alloc->free_async_space += size + sizeof(struct binder_buffer);
555
556 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
557 "%d: binder_free_buf size %zd async free %zd\n",
558 alloc->pid, size, alloc->free_async_space);
559 }
560
561 binder_update_page_range(alloc, 0,
562 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
563 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
564 NULL);
565
566 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
567 buffer->free = 1;
568 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yang7aed47a2017-06-30 10:22:23 -0700569 struct binder_buffer *next = binder_buffer_next(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700570
571 if (next->free) {
572 rb_erase(&next->rb_node, &alloc->free_buffers);
573 binder_delete_free_buffer(alloc, next);
574 }
575 }
576 if (alloc->buffers.next != &buffer->entry) {
Sherry Yang7aed47a2017-06-30 10:22:23 -0700577 struct binder_buffer *prev = binder_buffer_prev(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700578
579 if (prev->free) {
580 binder_delete_free_buffer(alloc, buffer);
581 rb_erase(&prev->rb_node, &alloc->free_buffers);
582 buffer = prev;
583 }
584 }
585 binder_insert_free_buffer(alloc, buffer);
586}
587
588/**
589 * binder_alloc_free_buf() - free a binder buffer
590 * @alloc: binder_alloc for this proc
591 * @buffer: kernel pointer to buffer
592 *
593 * Free the buffer allocated via binder_alloc_new_buffer()
594 */
595void binder_alloc_free_buf(struct binder_alloc *alloc,
596 struct binder_buffer *buffer)
597{
598 mutex_lock(&alloc->mutex);
599 binder_free_buf_locked(alloc, buffer);
600 mutex_unlock(&alloc->mutex);
601}
602
603/**
604 * binder_alloc_mmap_handler() - map virtual address space for proc
605 * @alloc: alloc structure for this proc
606 * @vma: vma passed to mmap()
607 *
608 * Called by binder_mmap() to initialize the space specified in
609 * vma for allocating binder buffers
610 *
611 * Return:
612 * 0 = success
613 * -EBUSY = address space already mapped
614 * -ENOMEM = failed to map memory to given address space
615 */
616int binder_alloc_mmap_handler(struct binder_alloc *alloc,
617 struct vm_area_struct *vma)
618{
619 int ret;
620 struct vm_struct *area;
621 const char *failure_string;
622 struct binder_buffer *buffer;
623
624 mutex_lock(&binder_alloc_mmap_lock);
625 if (alloc->buffer) {
626 ret = -EBUSY;
627 failure_string = "already mapped";
628 goto err_already_mapped;
629 }
630
631 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
632 if (area == NULL) {
633 ret = -ENOMEM;
634 failure_string = "get_vm_area";
635 goto err_get_vm_area_failed;
636 }
637 alloc->buffer = area->addr;
638 alloc->user_buffer_offset =
639 vma->vm_start - (uintptr_t)alloc->buffer;
640 mutex_unlock(&binder_alloc_mmap_lock);
641
642#ifdef CONFIG_CPU_CACHE_VIPT
643 if (cache_is_vipt_aliasing()) {
644 while (CACHE_COLOUR(
645 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
646 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
647 __func__, alloc->pid, vma->vm_start,
648 vma->vm_end, alloc->buffer);
649 vma->vm_start += PAGE_SIZE;
650 }
651 }
652#endif
653 alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
654 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
655 GFP_KERNEL);
656 if (alloc->pages == NULL) {
657 ret = -ENOMEM;
658 failure_string = "alloc page array";
659 goto err_alloc_pages_failed;
660 }
661 alloc->buffer_size = vma->vm_end - vma->vm_start;
662
Sherry Yang7ffd4942017-08-03 11:33:53 -0700663 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
664 if (!buffer) {
Todd Kjosb9341022016-10-10 10:40:53 -0700665 ret = -ENOMEM;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700666 failure_string = "alloc buffer struct";
667 goto err_alloc_buf_struct_failed;
Todd Kjosb9341022016-10-10 10:40:53 -0700668 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700669
670 buffer->data = alloc->buffer;
Todd Kjosb9341022016-10-10 10:40:53 -0700671 list_add(&buffer->entry, &alloc->buffers);
672 buffer->free = 1;
673 binder_insert_free_buffer(alloc, buffer);
674 alloc->free_async_space = alloc->buffer_size / 2;
675 barrier();
676 alloc->vma = vma;
677 alloc->vma_vm_mm = vma->vm_mm;
678
679 return 0;
680
Sherry Yang7ffd4942017-08-03 11:33:53 -0700681err_alloc_buf_struct_failed:
Todd Kjosb9341022016-10-10 10:40:53 -0700682 kfree(alloc->pages);
683 alloc->pages = NULL;
684err_alloc_pages_failed:
685 mutex_lock(&binder_alloc_mmap_lock);
686 vfree(alloc->buffer);
687 alloc->buffer = NULL;
688err_get_vm_area_failed:
689err_already_mapped:
690 mutex_unlock(&binder_alloc_mmap_lock);
691 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
692 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
693 return ret;
694}
695
696
697void binder_alloc_deferred_release(struct binder_alloc *alloc)
698{
699 struct rb_node *n;
700 int buffers, page_count;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700701 struct binder_buffer *buffer;
Todd Kjosb9341022016-10-10 10:40:53 -0700702
703 BUG_ON(alloc->vma);
704
705 buffers = 0;
706 mutex_lock(&alloc->mutex);
707 while ((n = rb_first(&alloc->allocated_buffers))) {
Todd Kjosb9341022016-10-10 10:40:53 -0700708 buffer = rb_entry(n, struct binder_buffer, rb_node);
709
710 /* Transaction should already have been freed */
711 BUG_ON(buffer->transaction);
712
713 binder_free_buf_locked(alloc, buffer);
714 buffers++;
715 }
716
Sherry Yang7ffd4942017-08-03 11:33:53 -0700717 while (!list_empty(&alloc->buffers)) {
718 buffer = list_first_entry(&alloc->buffers,
719 struct binder_buffer, entry);
720 WARN_ON(!buffer->free);
721
722 list_del(&buffer->entry);
723 WARN_ON_ONCE(!list_empty(&alloc->buffers));
724 kfree(buffer);
725 }
726
Todd Kjosb9341022016-10-10 10:40:53 -0700727 page_count = 0;
728 if (alloc->pages) {
729 int i;
730
731 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
732 void *page_addr;
733
734 if (!alloc->pages[i])
735 continue;
736
737 page_addr = alloc->buffer + i * PAGE_SIZE;
738 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
739 "%s: %d: page %d at %pK not freed\n",
740 __func__, alloc->pid, i, page_addr);
741 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
742 __free_page(alloc->pages[i]);
743 page_count++;
744 }
745 kfree(alloc->pages);
746 vfree(alloc->buffer);
747 }
748 mutex_unlock(&alloc->mutex);
749
750 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
751 "%s: %d buffers %d, pages %d\n",
752 __func__, alloc->pid, buffers, page_count);
753}
754
755static void print_binder_buffer(struct seq_file *m, const char *prefix,
756 struct binder_buffer *buffer)
757{
Martijn Coenen970df8b2017-03-15 18:22:52 +0100758 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
Todd Kjosb9341022016-10-10 10:40:53 -0700759 prefix, buffer->debug_id, buffer->data,
760 buffer->data_size, buffer->offsets_size,
Martijn Coenen970df8b2017-03-15 18:22:52 +0100761 buffer->extra_buffers_size,
Todd Kjosb9341022016-10-10 10:40:53 -0700762 buffer->transaction ? "active" : "delivered");
763}
764
765/**
766 * binder_alloc_print_allocated() - print buffer info
767 * @m: seq_file for output via seq_printf()
768 * @alloc: binder_alloc for this proc
769 *
770 * Prints information about every buffer associated with
771 * the binder_alloc state to the given seq_file
772 */
773void binder_alloc_print_allocated(struct seq_file *m,
774 struct binder_alloc *alloc)
775{
776 struct rb_node *n;
777
778 mutex_lock(&alloc->mutex);
779 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
780 print_binder_buffer(m, " buffer",
781 rb_entry(n, struct binder_buffer, rb_node));
782 mutex_unlock(&alloc->mutex);
783}
784
785/**
786 * binder_alloc_get_allocated_count() - return count of buffers
787 * @alloc: binder_alloc for this proc
788 *
789 * Return: count of allocated buffers
790 */
791int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
792{
793 struct rb_node *n;
794 int count = 0;
795
796 mutex_lock(&alloc->mutex);
797 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
798 count++;
799 mutex_unlock(&alloc->mutex);
800 return count;
801}
802
803
804/**
805 * binder_alloc_vma_close() - invalidate address space
806 * @alloc: binder_alloc for this proc
807 *
808 * Called from binder_vma_close() when releasing address space.
809 * Clears alloc->vma to prevent new incoming transactions from
810 * allocating more buffers.
811 */
812void binder_alloc_vma_close(struct binder_alloc *alloc)
813{
814 WRITE_ONCE(alloc->vma, NULL);
815 WRITE_ONCE(alloc->vma_vm_mm, NULL);
816}
817
818/**
819 * binder_alloc_init() - called by binder_open() for per-proc initialization
820 * @alloc: binder_alloc for this proc
821 *
822 * Called from binder_open() to initialize binder_alloc fields for
823 * new binder proc
824 */
825void binder_alloc_init(struct binder_alloc *alloc)
826{
827 alloc->tsk = current->group_leader;
828 alloc->pid = current->group_leader->pid;
829 mutex_init(&alloc->mutex);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700830 INIT_LIST_HEAD(&alloc->buffers);
Todd Kjosb9341022016-10-10 10:40:53 -0700831}
832