blob: a0af1419cc797010c03d7dae090cfea62a0868fb [file] [log] [blame]
Todd Kjos0c972a02017-06-29 12:01:41 -07001/* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <asm/cacheflush.h>
21#include <linux/list.h>
22#include <linux/sched/mm.h>
23#include <linux/module.h>
24#include <linux/rtmutex.h>
25#include <linux/rbtree.h>
26#include <linux/seq_file.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include "binder_alloc.h"
31#include "binder_trace.h"
32
33static DEFINE_MUTEX(binder_alloc_mmap_lock);
34
35enum {
36 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39};
40static uint32_t binder_alloc_debug_mask;
41
42module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45#define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
48 pr_info(x); \
49 } while (0)
50
51static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
52 struct binder_buffer *buffer)
53{
54 if (list_is_last(&buffer->entry, &alloc->buffers))
55 return alloc->buffer +
56 alloc->buffer_size - (void *)buffer->data;
57 return (size_t)list_entry(buffer->entry.next,
58 struct binder_buffer, entry) - (size_t)buffer->data;
59}
60
61static void binder_insert_free_buffer(struct binder_alloc *alloc,
62 struct binder_buffer *new_buffer)
63{
64 struct rb_node **p = &alloc->free_buffers.rb_node;
65 struct rb_node *parent = NULL;
66 struct binder_buffer *buffer;
67 size_t buffer_size;
68 size_t new_buffer_size;
69
70 BUG_ON(!new_buffer->free);
71
72 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
73
74 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
75 "%d: add free buffer, size %zd, at %pK\n",
76 alloc->pid, new_buffer_size, new_buffer);
77
78 while (*p) {
79 parent = *p;
80 buffer = rb_entry(parent, struct binder_buffer, rb_node);
81 BUG_ON(!buffer->free);
82
83 buffer_size = binder_alloc_buffer_size(alloc, buffer);
84
85 if (new_buffer_size < buffer_size)
86 p = &parent->rb_left;
87 else
88 p = &parent->rb_right;
89 }
90 rb_link_node(&new_buffer->rb_node, parent, p);
91 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
92}
93
94static void binder_insert_allocated_buffer_locked(
95 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
96{
97 struct rb_node **p = &alloc->allocated_buffers.rb_node;
98 struct rb_node *parent = NULL;
99 struct binder_buffer *buffer;
100
101 BUG_ON(new_buffer->free);
102
103 while (*p) {
104 parent = *p;
105 buffer = rb_entry(parent, struct binder_buffer, rb_node);
106 BUG_ON(buffer->free);
107
108 if (new_buffer < buffer)
109 p = &parent->rb_left;
110 else if (new_buffer > buffer)
111 p = &parent->rb_right;
112 else
113 BUG();
114 }
115 rb_link_node(&new_buffer->rb_node, parent, p);
116 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
117}
118
119static struct binder_buffer *binder_alloc_buffer_lookup_locked(
120 struct binder_alloc *alloc,
121 uintptr_t user_ptr)
122{
123 struct rb_node *n = alloc->allocated_buffers.rb_node;
124 struct binder_buffer *buffer;
125 struct binder_buffer *kern_ptr;
126
127 kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
128 - offsetof(struct binder_buffer, data));
129
130 while (n) {
131 buffer = rb_entry(n, struct binder_buffer, rb_node);
132 BUG_ON(buffer->free);
133
134 if (kern_ptr < buffer)
135 n = n->rb_left;
136 else if (kern_ptr > buffer)
137 n = n->rb_right;
138 else
139 return buffer;
140 }
141 return NULL;
142}
143
144/**
145 * binder_alloc_buffer_lookup() - get buffer given user ptr
146 * @alloc: binder_alloc for this proc
147 * @user_ptr: User pointer to buffer data
148 *
149 * Validate userspace pointer to buffer data and return buffer corresponding to
150 * that user pointer. Search the rb tree for buffer that matches user data
151 * pointer.
152 *
153 * Return: Pointer to buffer or NULL
154 */
155struct binder_buffer *binder_alloc_buffer_lookup(struct binder_alloc *alloc,
156 uintptr_t user_ptr)
157{
158 struct binder_buffer *buffer;
159
160 mutex_lock(&alloc->mutex);
161 buffer = binder_alloc_buffer_lookup_locked(alloc, user_ptr);
162 mutex_unlock(&alloc->mutex);
163 return buffer;
164}
165
166static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
167 void *start, void *end,
168 struct vm_area_struct *vma)
169{
170 void *page_addr;
171 unsigned long user_page_addr;
172 struct page **page;
173 struct mm_struct *mm;
174
175 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
176 "%d: %s pages %pK-%pK\n", alloc->pid,
177 allocate ? "allocate" : "free", start, end);
178
179 if (end <= start)
180 return 0;
181
182 trace_binder_update_page_range(alloc, allocate, start, end);
183
184 if (vma)
185 mm = NULL;
186 else
187 mm = get_task_mm(alloc->tsk);
188
189 if (mm) {
190 down_write(&mm->mmap_sem);
191 vma = alloc->vma;
192 if (vma && mm != alloc->vma_vm_mm) {
193 pr_err("%d: vma mm and task mm mismatch\n",
194 alloc->pid);
195 vma = NULL;
196 }
197 }
198
199 if (allocate == 0)
200 goto free_range;
201
202 if (vma == NULL) {
203 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
204 alloc->pid);
205 goto err_no_vma;
206 }
207
208 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
209 int ret;
210
211 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
212
213 BUG_ON(*page);
214 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
215 if (*page == NULL) {
216 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
217 alloc->pid, page_addr);
218 goto err_alloc_page_failed;
219 }
220 ret = map_kernel_range_noflush((unsigned long)page_addr,
221 PAGE_SIZE, PAGE_KERNEL, page);
222 flush_cache_vmap((unsigned long)page_addr,
223 (unsigned long)page_addr + PAGE_SIZE);
224 if (ret != 1) {
225 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
226 alloc->pid, page_addr);
227 goto err_map_kernel_failed;
228 }
229 user_page_addr =
230 (uintptr_t)page_addr + alloc->user_buffer_offset;
231 ret = vm_insert_page(vma, user_page_addr, page[0]);
232 if (ret) {
233 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
234 alloc->pid, user_page_addr);
235 goto err_vm_insert_page_failed;
236 }
237 /* vm_insert_page does not seem to increment the refcount */
238 }
239 if (mm) {
240 up_write(&mm->mmap_sem);
241 mmput(mm);
242 }
243 return 0;
244
245free_range:
246 for (page_addr = end - PAGE_SIZE; page_addr >= start;
247 page_addr -= PAGE_SIZE) {
248 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
249 if (vma)
250 zap_page_range(vma, (uintptr_t)page_addr +
251 alloc->user_buffer_offset, PAGE_SIZE);
252err_vm_insert_page_failed:
253 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
254err_map_kernel_failed:
255 __free_page(*page);
256 *page = NULL;
257err_alloc_page_failed:
258 ;
259 }
260err_no_vma:
261 if (mm) {
262 up_write(&mm->mmap_sem);
263 mmput(mm);
264 }
Todd Kjos57ada2f2017-06-29 12:01:46 -0700265 return vma ? -ENOMEM : -ESRCH;
Todd Kjos0c972a02017-06-29 12:01:41 -0700266}
267
268struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
269 size_t data_size,
270 size_t offsets_size,
271 size_t extra_buffers_size,
272 int is_async)
273{
274 struct rb_node *n = alloc->free_buffers.rb_node;
275 struct binder_buffer *buffer;
276 size_t buffer_size;
277 struct rb_node *best_fit = NULL;
278 void *has_page_addr;
279 void *end_page_addr;
280 size_t size, data_offsets_size;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700281 int ret;
Todd Kjos0c972a02017-06-29 12:01:41 -0700282
283 if (alloc->vma == NULL) {
284 pr_err("%d: binder_alloc_buf, no vma\n",
285 alloc->pid);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700286 return ERR_PTR(-ESRCH);
Todd Kjos0c972a02017-06-29 12:01:41 -0700287 }
288
289 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
290 ALIGN(offsets_size, sizeof(void *));
291
292 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
293 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
294 "%d: got transaction with invalid size %zd-%zd\n",
295 alloc->pid, data_size, offsets_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700296 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700297 }
298 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
299 if (size < data_offsets_size || size < extra_buffers_size) {
300 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
301 "%d: got transaction with invalid extra_buffers_size %zd\n",
302 alloc->pid, extra_buffers_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700303 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700304 }
305 if (is_async &&
306 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
307 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
308 "%d: binder_alloc_buf size %zd failed, no async space left\n",
309 alloc->pid, size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700310 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700311 }
312
313 while (n) {
314 buffer = rb_entry(n, struct binder_buffer, rb_node);
315 BUG_ON(!buffer->free);
316 buffer_size = binder_alloc_buffer_size(alloc, buffer);
317
318 if (size < buffer_size) {
319 best_fit = n;
320 n = n->rb_left;
321 } else if (size > buffer_size)
322 n = n->rb_right;
323 else {
324 best_fit = n;
325 break;
326 }
327 }
328 if (best_fit == NULL) {
329 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
330 alloc->pid, size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700331 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700332 }
333 if (n == NULL) {
334 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
335 buffer_size = binder_alloc_buffer_size(alloc, buffer);
336 }
337
338 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
339 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
340 alloc->pid, size, buffer, buffer_size);
341
342 has_page_addr =
343 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
344 if (n == NULL) {
345 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
346 buffer_size = size; /* no room for other buffers */
347 else
348 buffer_size = size + sizeof(struct binder_buffer);
349 }
350 end_page_addr =
351 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
352 if (end_page_addr > has_page_addr)
353 end_page_addr = has_page_addr;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700354 ret = binder_update_page_range(alloc, 1,
355 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
356 if (ret)
357 return ERR_PTR(ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700358
359 rb_erase(best_fit, &alloc->free_buffers);
360 buffer->free = 0;
361 binder_insert_allocated_buffer_locked(alloc, buffer);
362 if (buffer_size != size) {
363 struct binder_buffer *new_buffer = (void *)buffer->data + size;
364
365 list_add(&new_buffer->entry, &buffer->entry);
366 new_buffer->free = 1;
367 binder_insert_free_buffer(alloc, new_buffer);
368 }
369 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
370 "%d: binder_alloc_buf size %zd got %pK\n",
371 alloc->pid, size, buffer);
372 buffer->data_size = data_size;
373 buffer->offsets_size = offsets_size;
374 buffer->async_transaction = is_async;
375 buffer->extra_buffers_size = extra_buffers_size;
376 if (is_async) {
377 alloc->free_async_space -= size + sizeof(struct binder_buffer);
378 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
379 "%d: binder_alloc_buf size %zd async free %zd\n",
380 alloc->pid, size, alloc->free_async_space);
381 }
382 return buffer;
383}
384
385/**
386 * binder_alloc_new_buf() - Allocate a new binder buffer
387 * @alloc: binder_alloc for this proc
388 * @data_size: size of user data buffer
389 * @offsets_size: user specified buffer offset
390 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
391 * @is_async: buffer for async transaction
392 *
393 * Allocate a new buffer given the requested sizes. Returns
394 * the kernel version of the buffer pointer. The size allocated
395 * is the sum of the three given sizes (each rounded up to
396 * pointer-sized boundary)
397 *
398 * Return: The allocated buffer or %NULL if error
399 */
400struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
401 size_t data_size,
402 size_t offsets_size,
403 size_t extra_buffers_size,
404 int is_async)
405{
406 struct binder_buffer *buffer;
407
408 mutex_lock(&alloc->mutex);
409 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
410 extra_buffers_size, is_async);
411 mutex_unlock(&alloc->mutex);
412 return buffer;
413}
414
415static void *buffer_start_page(struct binder_buffer *buffer)
416{
417 return (void *)((uintptr_t)buffer & PAGE_MASK);
418}
419
420static void *buffer_end_page(struct binder_buffer *buffer)
421{
422 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
423}
424
425static void binder_delete_free_buffer(struct binder_alloc *alloc,
426 struct binder_buffer *buffer)
427{
428 struct binder_buffer *prev, *next = NULL;
429 int free_page_end = 1;
430 int free_page_start = 1;
431
432 BUG_ON(alloc->buffers.next == &buffer->entry);
433 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
434 BUG_ON(!prev->free);
435 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
436 free_page_start = 0;
437 if (buffer_end_page(prev) == buffer_end_page(buffer))
438 free_page_end = 0;
439 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
440 "%d: merge free, buffer %pK share page with %pK\n",
441 alloc->pid, buffer, prev);
442 }
443
444 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
445 next = list_entry(buffer->entry.next,
446 struct binder_buffer, entry);
447 if (buffer_start_page(next) == buffer_end_page(buffer)) {
448 free_page_end = 0;
449 if (buffer_start_page(next) ==
450 buffer_start_page(buffer))
451 free_page_start = 0;
452 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
453 "%d: merge free, buffer %pK share page with %pK\n",
454 alloc->pid, buffer, prev);
455 }
456 }
457 list_del(&buffer->entry);
458 if (free_page_start || free_page_end) {
459 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
460 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
461 alloc->pid, buffer, free_page_start ? "" : " end",
462 free_page_end ? "" : " start", prev, next);
463 binder_update_page_range(alloc, 0, free_page_start ?
464 buffer_start_page(buffer) : buffer_end_page(buffer),
465 (free_page_end ? buffer_end_page(buffer) :
466 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
467 }
468}
469
470static void binder_free_buf_locked(struct binder_alloc *alloc,
471 struct binder_buffer *buffer)
472{
473 size_t size, buffer_size;
474
475 buffer_size = binder_alloc_buffer_size(alloc, buffer);
476
477 size = ALIGN(buffer->data_size, sizeof(void *)) +
478 ALIGN(buffer->offsets_size, sizeof(void *)) +
479 ALIGN(buffer->extra_buffers_size, sizeof(void *));
480
481 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
482 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
483 alloc->pid, buffer, size, buffer_size);
484
485 BUG_ON(buffer->free);
486 BUG_ON(size > buffer_size);
487 BUG_ON(buffer->transaction != NULL);
488 BUG_ON((void *)buffer < alloc->buffer);
489 BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
490
491 if (buffer->async_transaction) {
492 alloc->free_async_space += size + sizeof(struct binder_buffer);
493
494 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
495 "%d: binder_free_buf size %zd async free %zd\n",
496 alloc->pid, size, alloc->free_async_space);
497 }
498
499 binder_update_page_range(alloc, 0,
500 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
501 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
502 NULL);
503
504 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
505 buffer->free = 1;
506 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
507 struct binder_buffer *next = list_entry(buffer->entry.next,
508 struct binder_buffer, entry);
509
510 if (next->free) {
511 rb_erase(&next->rb_node, &alloc->free_buffers);
512 binder_delete_free_buffer(alloc, next);
513 }
514 }
515 if (alloc->buffers.next != &buffer->entry) {
516 struct binder_buffer *prev = list_entry(buffer->entry.prev,
517 struct binder_buffer, entry);
518
519 if (prev->free) {
520 binder_delete_free_buffer(alloc, buffer);
521 rb_erase(&prev->rb_node, &alloc->free_buffers);
522 buffer = prev;
523 }
524 }
525 binder_insert_free_buffer(alloc, buffer);
526}
527
528/**
529 * binder_alloc_free_buf() - free a binder buffer
530 * @alloc: binder_alloc for this proc
531 * @buffer: kernel pointer to buffer
532 *
533 * Free the buffer allocated via binder_alloc_new_buffer()
534 */
535void binder_alloc_free_buf(struct binder_alloc *alloc,
536 struct binder_buffer *buffer)
537{
538 mutex_lock(&alloc->mutex);
539 binder_free_buf_locked(alloc, buffer);
540 mutex_unlock(&alloc->mutex);
541}
542
543/**
544 * binder_alloc_mmap_handler() - map virtual address space for proc
545 * @alloc: alloc structure for this proc
546 * @vma: vma passed to mmap()
547 *
548 * Called by binder_mmap() to initialize the space specified in
549 * vma for allocating binder buffers
550 *
551 * Return:
552 * 0 = success
553 * -EBUSY = address space already mapped
554 * -ENOMEM = failed to map memory to given address space
555 */
556int binder_alloc_mmap_handler(struct binder_alloc *alloc,
557 struct vm_area_struct *vma)
558{
559 int ret;
560 struct vm_struct *area;
561 const char *failure_string;
562 struct binder_buffer *buffer;
563
564 mutex_lock(&binder_alloc_mmap_lock);
565 if (alloc->buffer) {
566 ret = -EBUSY;
567 failure_string = "already mapped";
568 goto err_already_mapped;
569 }
570
571 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
572 if (area == NULL) {
573 ret = -ENOMEM;
574 failure_string = "get_vm_area";
575 goto err_get_vm_area_failed;
576 }
577 alloc->buffer = area->addr;
578 alloc->user_buffer_offset =
579 vma->vm_start - (uintptr_t)alloc->buffer;
580 mutex_unlock(&binder_alloc_mmap_lock);
581
582#ifdef CONFIG_CPU_CACHE_VIPT
583 if (cache_is_vipt_aliasing()) {
584 while (CACHE_COLOUR(
585 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
586 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
587 __func__, alloc->pid, vma->vm_start,
588 vma->vm_end, alloc->buffer);
589 vma->vm_start += PAGE_SIZE;
590 }
591 }
592#endif
593 alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
594 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
595 GFP_KERNEL);
596 if (alloc->pages == NULL) {
597 ret = -ENOMEM;
598 failure_string = "alloc page array";
599 goto err_alloc_pages_failed;
600 }
601 alloc->buffer_size = vma->vm_end - vma->vm_start;
602
603 if (binder_update_page_range(alloc, 1, alloc->buffer,
604 alloc->buffer + PAGE_SIZE, vma)) {
605 ret = -ENOMEM;
606 failure_string = "alloc small buf";
607 goto err_alloc_small_buf_failed;
608 }
609 buffer = alloc->buffer;
610 INIT_LIST_HEAD(&alloc->buffers);
611 list_add(&buffer->entry, &alloc->buffers);
612 buffer->free = 1;
613 binder_insert_free_buffer(alloc, buffer);
614 alloc->free_async_space = alloc->buffer_size / 2;
615 barrier();
616 alloc->vma = vma;
617 alloc->vma_vm_mm = vma->vm_mm;
618
619 return 0;
620
621err_alloc_small_buf_failed:
622 kfree(alloc->pages);
623 alloc->pages = NULL;
624err_alloc_pages_failed:
625 mutex_lock(&binder_alloc_mmap_lock);
626 vfree(alloc->buffer);
627 alloc->buffer = NULL;
628err_get_vm_area_failed:
629err_already_mapped:
630 mutex_unlock(&binder_alloc_mmap_lock);
631 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
632 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
633 return ret;
634}
635
636
637void binder_alloc_deferred_release(struct binder_alloc *alloc)
638{
639 struct rb_node *n;
640 int buffers, page_count;
641
642 BUG_ON(alloc->vma);
643
644 buffers = 0;
645 mutex_lock(&alloc->mutex);
646 while ((n = rb_first(&alloc->allocated_buffers))) {
647 struct binder_buffer *buffer;
648
649 buffer = rb_entry(n, struct binder_buffer, rb_node);
650
651 /* Transaction should already have been freed */
652 BUG_ON(buffer->transaction);
653
654 binder_free_buf_locked(alloc, buffer);
655 buffers++;
656 }
657
658 page_count = 0;
659 if (alloc->pages) {
660 int i;
661
662 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
663 void *page_addr;
664
665 if (!alloc->pages[i])
666 continue;
667
668 page_addr = alloc->buffer + i * PAGE_SIZE;
669 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
670 "%s: %d: page %d at %pK not freed\n",
671 __func__, alloc->pid, i, page_addr);
672 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
673 __free_page(alloc->pages[i]);
674 page_count++;
675 }
676 kfree(alloc->pages);
677 vfree(alloc->buffer);
678 }
679 mutex_unlock(&alloc->mutex);
680
681 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
682 "%s: %d buffers %d, pages %d\n",
683 __func__, alloc->pid, buffers, page_count);
684}
685
686static void print_binder_buffer(struct seq_file *m, const char *prefix,
687 struct binder_buffer *buffer)
688{
689 seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
690 prefix, buffer->debug_id, buffer->data,
691 buffer->data_size, buffer->offsets_size,
692 buffer->transaction ? "active" : "delivered");
693}
694
695/**
696 * binder_alloc_print_allocated() - print buffer info
697 * @m: seq_file for output via seq_printf()
698 * @alloc: binder_alloc for this proc
699 *
700 * Prints information about every buffer associated with
701 * the binder_alloc state to the given seq_file
702 */
703void binder_alloc_print_allocated(struct seq_file *m,
704 struct binder_alloc *alloc)
705{
706 struct rb_node *n;
707
708 mutex_lock(&alloc->mutex);
709 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
710 print_binder_buffer(m, " buffer",
711 rb_entry(n, struct binder_buffer, rb_node));
712 mutex_unlock(&alloc->mutex);
713}
714
715/**
716 * binder_alloc_get_allocated_count() - return count of buffers
717 * @alloc: binder_alloc for this proc
718 *
719 * Return: count of allocated buffers
720 */
721int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
722{
723 struct rb_node *n;
724 int count = 0;
725
726 mutex_lock(&alloc->mutex);
727 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
728 count++;
729 mutex_unlock(&alloc->mutex);
730 return count;
731}
732
733
734/**
735 * binder_alloc_vma_close() - invalidate address space
736 * @alloc: binder_alloc for this proc
737 *
738 * Called from binder_vma_close() when releasing address space.
739 * Clears alloc->vma to prevent new incoming transactions from
740 * allocating more buffers.
741 */
742void binder_alloc_vma_close(struct binder_alloc *alloc)
743{
744 WRITE_ONCE(alloc->vma, NULL);
745 WRITE_ONCE(alloc->vma_vm_mm, NULL);
746}
747
748/**
749 * binder_alloc_init() - called by binder_open() for per-proc initialization
750 * @alloc: binder_alloc for this proc
751 *
752 * Called from binder_open() to initialize binder_alloc fields for
753 * new binder proc
754 */
755void binder_alloc_init(struct binder_alloc *alloc)
756{
757 alloc->tsk = current->group_leader;
758 alloc->pid = current->group_leader->pid;
759 mutex_init(&alloc->mutex);
760}
761