blob: af86433accea6f919feff0b4454ed66b9c8be8f5 [file] [log] [blame]
Todd Kjosb9341022016-10-10 10:40:53 -07001/* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <asm/cacheflush.h>
21#include <linux/list.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/rtmutex.h>
25#include <linux/rbtree.h>
26#include <linux/seq_file.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include "binder_alloc.h"
31#include "binder_trace.h"
32
33static DEFINE_MUTEX(binder_alloc_mmap_lock);
34
35enum {
36 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39};
40static uint32_t binder_alloc_debug_mask;
41
42module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45#define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
48 pr_info(x); \
49 } while (0)
50
51static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
52 struct binder_buffer *buffer)
53{
54 if (list_is_last(&buffer->entry, &alloc->buffers))
55 return alloc->buffer +
56 alloc->buffer_size - (void *)buffer->data;
57 return (size_t)list_entry(buffer->entry.next,
58 struct binder_buffer, entry) - (size_t)buffer->data;
59}
60
61static void binder_insert_free_buffer(struct binder_alloc *alloc,
62 struct binder_buffer *new_buffer)
63{
64 struct rb_node **p = &alloc->free_buffers.rb_node;
65 struct rb_node *parent = NULL;
66 struct binder_buffer *buffer;
67 size_t buffer_size;
68 size_t new_buffer_size;
69
70 BUG_ON(!new_buffer->free);
71
72 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
73
74 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
75 "%d: add free buffer, size %zd, at %pK\n",
76 alloc->pid, new_buffer_size, new_buffer);
77
78 while (*p) {
79 parent = *p;
80 buffer = rb_entry(parent, struct binder_buffer, rb_node);
81 BUG_ON(!buffer->free);
82
83 buffer_size = binder_alloc_buffer_size(alloc, buffer);
84
85 if (new_buffer_size < buffer_size)
86 p = &parent->rb_left;
87 else
88 p = &parent->rb_right;
89 }
90 rb_link_node(&new_buffer->rb_node, parent, p);
91 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
92}
93
94static void binder_insert_allocated_buffer_locked(
95 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
96{
97 struct rb_node **p = &alloc->allocated_buffers.rb_node;
98 struct rb_node *parent = NULL;
99 struct binder_buffer *buffer;
100
101 BUG_ON(new_buffer->free);
102
103 while (*p) {
104 parent = *p;
105 buffer = rb_entry(parent, struct binder_buffer, rb_node);
106 BUG_ON(buffer->free);
107
108 if (new_buffer < buffer)
109 p = &parent->rb_left;
110 else if (new_buffer > buffer)
111 p = &parent->rb_right;
112 else
113 BUG();
114 }
115 rb_link_node(&new_buffer->rb_node, parent, p);
116 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
117}
118
Todd Kjos076072a2017-04-21 14:32:11 -0700119static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjosb9341022016-10-10 10:40:53 -0700120 struct binder_alloc *alloc,
121 uintptr_t user_ptr)
122{
123 struct rb_node *n = alloc->allocated_buffers.rb_node;
124 struct binder_buffer *buffer;
125 struct binder_buffer *kern_ptr;
126
127 kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
128 - offsetof(struct binder_buffer, data));
129
130 while (n) {
131 buffer = rb_entry(n, struct binder_buffer, rb_node);
132 BUG_ON(buffer->free);
133
134 if (kern_ptr < buffer)
135 n = n->rb_left;
136 else if (kern_ptr > buffer)
137 n = n->rb_right;
Todd Kjos076072a2017-04-21 14:32:11 -0700138 else {
139 /*
140 * Guard against user threads attempting to
141 * free the buffer twice
142 */
143 if (buffer->free_in_progress) {
144 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
145 alloc->pid, current->pid, (u64)user_ptr);
146 return NULL;
147 }
148 buffer->free_in_progress = 1;
Todd Kjosb9341022016-10-10 10:40:53 -0700149 return buffer;
Todd Kjos076072a2017-04-21 14:32:11 -0700150 }
Todd Kjosb9341022016-10-10 10:40:53 -0700151 }
152 return NULL;
153}
154
155/**
156 * binder_alloc_buffer_lookup() - get buffer given user ptr
157 * @alloc: binder_alloc for this proc
158 * @user_ptr: User pointer to buffer data
159 *
160 * Validate userspace pointer to buffer data and return buffer corresponding to
161 * that user pointer. Search the rb tree for buffer that matches user data
162 * pointer.
163 *
164 * Return: Pointer to buffer or NULL
165 */
Todd Kjos076072a2017-04-21 14:32:11 -0700166struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
167 uintptr_t user_ptr)
Todd Kjosb9341022016-10-10 10:40:53 -0700168{
169 struct binder_buffer *buffer;
170
171 mutex_lock(&alloc->mutex);
Todd Kjos076072a2017-04-21 14:32:11 -0700172 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Todd Kjosb9341022016-10-10 10:40:53 -0700173 mutex_unlock(&alloc->mutex);
174 return buffer;
175}
176
177static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
178 void *start, void *end,
179 struct vm_area_struct *vma)
180{
181 void *page_addr;
182 unsigned long user_page_addr;
183 struct page **page;
184 struct mm_struct *mm;
185
186 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
187 "%d: %s pages %pK-%pK\n", alloc->pid,
188 allocate ? "allocate" : "free", start, end);
189
190 if (end <= start)
191 return 0;
192
193 trace_binder_update_page_range(alloc, allocate, start, end);
194
195 if (vma)
196 mm = NULL;
197 else
198 mm = get_task_mm(alloc->tsk);
199
200 if (mm) {
201 down_write(&mm->mmap_sem);
202 vma = alloc->vma;
203 if (vma && mm != alloc->vma_vm_mm) {
204 pr_err("%d: vma mm and task mm mismatch\n",
205 alloc->pid);
206 vma = NULL;
207 }
208 }
209
210 if (allocate == 0)
211 goto free_range;
212
213 if (vma == NULL) {
214 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
215 alloc->pid);
216 goto err_no_vma;
217 }
218
219 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
220 int ret;
221
222 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
223
224 BUG_ON(*page);
225 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
226 if (*page == NULL) {
227 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
228 alloc->pid, page_addr);
229 goto err_alloc_page_failed;
230 }
231 ret = map_kernel_range_noflush((unsigned long)page_addr,
232 PAGE_SIZE, PAGE_KERNEL, page);
233 flush_cache_vmap((unsigned long)page_addr,
234 (unsigned long)page_addr + PAGE_SIZE);
235 if (ret != 1) {
236 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
237 alloc->pid, page_addr);
238 goto err_map_kernel_failed;
239 }
240 user_page_addr =
241 (uintptr_t)page_addr + alloc->user_buffer_offset;
242 ret = vm_insert_page(vma, user_page_addr, page[0]);
243 if (ret) {
244 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
245 alloc->pid, user_page_addr);
246 goto err_vm_insert_page_failed;
247 }
248 /* vm_insert_page does not seem to increment the refcount */
249 }
250 if (mm) {
251 up_write(&mm->mmap_sem);
252 mmput(mm);
253 }
254 return 0;
255
256free_range:
257 for (page_addr = end - PAGE_SIZE; page_addr >= start;
258 page_addr -= PAGE_SIZE) {
259 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
260 if (vma)
261 zap_page_range(vma, (uintptr_t)page_addr +
262 alloc->user_buffer_offset, PAGE_SIZE, NULL);
263err_vm_insert_page_failed:
264 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
265err_map_kernel_failed:
266 __free_page(*page);
267 *page = NULL;
268err_alloc_page_failed:
269 ;
270 }
271err_no_vma:
272 if (mm) {
273 up_write(&mm->mmap_sem);
274 mmput(mm);
275 }
Todd Kjose598d172017-03-22 17:19:52 -0700276 return vma ? -ENOMEM : -ESRCH;
Todd Kjosb9341022016-10-10 10:40:53 -0700277}
278
279struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
280 size_t data_size,
281 size_t offsets_size,
282 size_t extra_buffers_size,
283 int is_async)
284{
285 struct rb_node *n = alloc->free_buffers.rb_node;
286 struct binder_buffer *buffer;
287 size_t buffer_size;
288 struct rb_node *best_fit = NULL;
289 void *has_page_addr;
290 void *end_page_addr;
291 size_t size, data_offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700292 int ret;
Todd Kjosb9341022016-10-10 10:40:53 -0700293
294 if (alloc->vma == NULL) {
295 pr_err("%d: binder_alloc_buf, no vma\n",
296 alloc->pid);
Todd Kjose598d172017-03-22 17:19:52 -0700297 return ERR_PTR(-ESRCH);
Todd Kjosb9341022016-10-10 10:40:53 -0700298 }
299
300 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
301 ALIGN(offsets_size, sizeof(void *));
302
303 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
304 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
305 "%d: got transaction with invalid size %zd-%zd\n",
306 alloc->pid, data_size, offsets_size);
Todd Kjose598d172017-03-22 17:19:52 -0700307 return ERR_PTR(-EINVAL);
Todd Kjosb9341022016-10-10 10:40:53 -0700308 }
309 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
310 if (size < data_offsets_size || size < extra_buffers_size) {
311 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
312 "%d: got transaction with invalid extra_buffers_size %zd\n",
313 alloc->pid, extra_buffers_size);
Todd Kjose598d172017-03-22 17:19:52 -0700314 return ERR_PTR(-EINVAL);
Todd Kjosb9341022016-10-10 10:40:53 -0700315 }
316 if (is_async &&
317 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
318 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
319 "%d: binder_alloc_buf size %zd failed, no async space left\n",
320 alloc->pid, size);
Todd Kjose598d172017-03-22 17:19:52 -0700321 return ERR_PTR(-ENOSPC);
Todd Kjosb9341022016-10-10 10:40:53 -0700322 }
323
324 while (n) {
325 buffer = rb_entry(n, struct binder_buffer, rb_node);
326 BUG_ON(!buffer->free);
327 buffer_size = binder_alloc_buffer_size(alloc, buffer);
328
329 if (size < buffer_size) {
330 best_fit = n;
331 n = n->rb_left;
332 } else if (size > buffer_size)
333 n = n->rb_right;
334 else {
335 best_fit = n;
336 break;
337 }
338 }
339 if (best_fit == NULL) {
340 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
341 alloc->pid, size);
Todd Kjose598d172017-03-22 17:19:52 -0700342 return ERR_PTR(-ENOSPC);
Todd Kjosb9341022016-10-10 10:40:53 -0700343 }
344 if (n == NULL) {
345 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
346 buffer_size = binder_alloc_buffer_size(alloc, buffer);
347 }
348
349 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
350 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
351 alloc->pid, size, buffer, buffer_size);
352
353 has_page_addr =
354 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
355 if (n == NULL) {
356 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
357 buffer_size = size; /* no room for other buffers */
358 else
359 buffer_size = size + sizeof(struct binder_buffer);
360 }
361 end_page_addr =
362 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
363 if (end_page_addr > has_page_addr)
364 end_page_addr = has_page_addr;
Todd Kjose598d172017-03-22 17:19:52 -0700365 ret = binder_update_page_range(alloc, 1,
366 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
367 if (ret)
368 return ERR_PTR(ret);
Todd Kjosb9341022016-10-10 10:40:53 -0700369
370 rb_erase(best_fit, &alloc->free_buffers);
371 buffer->free = 0;
Todd Kjos076072a2017-04-21 14:32:11 -0700372 buffer->free_in_progress = 0;
Todd Kjosb9341022016-10-10 10:40:53 -0700373 binder_insert_allocated_buffer_locked(alloc, buffer);
374 if (buffer_size != size) {
375 struct binder_buffer *new_buffer = (void *)buffer->data + size;
376
377 list_add(&new_buffer->entry, &buffer->entry);
378 new_buffer->free = 1;
379 binder_insert_free_buffer(alloc, new_buffer);
380 }
381 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
382 "%d: binder_alloc_buf size %zd got %pK\n",
383 alloc->pid, size, buffer);
384 buffer->data_size = data_size;
385 buffer->offsets_size = offsets_size;
386 buffer->async_transaction = is_async;
387 buffer->extra_buffers_size = extra_buffers_size;
388 if (is_async) {
389 alloc->free_async_space -= size + sizeof(struct binder_buffer);
390 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
391 "%d: binder_alloc_buf size %zd async free %zd\n",
392 alloc->pid, size, alloc->free_async_space);
393 }
394 return buffer;
395}
396
397/**
398 * binder_alloc_new_buf() - Allocate a new binder buffer
399 * @alloc: binder_alloc for this proc
400 * @data_size: size of user data buffer
401 * @offsets_size: user specified buffer offset
402 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
403 * @is_async: buffer for async transaction
404 *
405 * Allocate a new buffer given the requested sizes. Returns
406 * the kernel version of the buffer pointer. The size allocated
407 * is the sum of the three given sizes (each rounded up to
408 * pointer-sized boundary)
409 *
410 * Return: The allocated buffer or %NULL if error
411 */
412struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
413 size_t data_size,
414 size_t offsets_size,
415 size_t extra_buffers_size,
416 int is_async)
417{
418 struct binder_buffer *buffer;
419
420 mutex_lock(&alloc->mutex);
421 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
422 extra_buffers_size, is_async);
423 mutex_unlock(&alloc->mutex);
424 return buffer;
425}
426
427static void *buffer_start_page(struct binder_buffer *buffer)
428{
429 return (void *)((uintptr_t)buffer & PAGE_MASK);
430}
431
432static void *buffer_end_page(struct binder_buffer *buffer)
433{
434 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
435}
436
437static void binder_delete_free_buffer(struct binder_alloc *alloc,
438 struct binder_buffer *buffer)
439{
440 struct binder_buffer *prev, *next = NULL;
441 int free_page_end = 1;
442 int free_page_start = 1;
443
444 BUG_ON(alloc->buffers.next == &buffer->entry);
445 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
446 BUG_ON(!prev->free);
447 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
448 free_page_start = 0;
449 if (buffer_end_page(prev) == buffer_end_page(buffer))
450 free_page_end = 0;
451 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
452 "%d: merge free, buffer %pK share page with %pK\n",
453 alloc->pid, buffer, prev);
454 }
455
456 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
457 next = list_entry(buffer->entry.next,
458 struct binder_buffer, entry);
459 if (buffer_start_page(next) == buffer_end_page(buffer)) {
460 free_page_end = 0;
461 if (buffer_start_page(next) ==
462 buffer_start_page(buffer))
463 free_page_start = 0;
464 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
465 "%d: merge free, buffer %pK share page with %pK\n",
466 alloc->pid, buffer, prev);
467 }
468 }
469 list_del(&buffer->entry);
470 if (free_page_start || free_page_end) {
471 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
472 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
473 alloc->pid, buffer, free_page_start ? "" : " end",
474 free_page_end ? "" : " start", prev, next);
475 binder_update_page_range(alloc, 0, free_page_start ?
476 buffer_start_page(buffer) : buffer_end_page(buffer),
477 (free_page_end ? buffer_end_page(buffer) :
478 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
479 }
480}
481
482static void binder_free_buf_locked(struct binder_alloc *alloc,
483 struct binder_buffer *buffer)
484{
485 size_t size, buffer_size;
486
487 buffer_size = binder_alloc_buffer_size(alloc, buffer);
488
489 size = ALIGN(buffer->data_size, sizeof(void *)) +
490 ALIGN(buffer->offsets_size, sizeof(void *)) +
491 ALIGN(buffer->extra_buffers_size, sizeof(void *));
492
493 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
494 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
495 alloc->pid, buffer, size, buffer_size);
496
497 BUG_ON(buffer->free);
498 BUG_ON(size > buffer_size);
499 BUG_ON(buffer->transaction != NULL);
500 BUG_ON((void *)buffer < alloc->buffer);
501 BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
502
503 if (buffer->async_transaction) {
504 alloc->free_async_space += size + sizeof(struct binder_buffer);
505
506 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
507 "%d: binder_free_buf size %zd async free %zd\n",
508 alloc->pid, size, alloc->free_async_space);
509 }
510
511 binder_update_page_range(alloc, 0,
512 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
513 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
514 NULL);
515
516 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
517 buffer->free = 1;
518 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
519 struct binder_buffer *next = list_entry(buffer->entry.next,
520 struct binder_buffer, entry);
521
522 if (next->free) {
523 rb_erase(&next->rb_node, &alloc->free_buffers);
524 binder_delete_free_buffer(alloc, next);
525 }
526 }
527 if (alloc->buffers.next != &buffer->entry) {
528 struct binder_buffer *prev = list_entry(buffer->entry.prev,
529 struct binder_buffer, entry);
530
531 if (prev->free) {
532 binder_delete_free_buffer(alloc, buffer);
533 rb_erase(&prev->rb_node, &alloc->free_buffers);
534 buffer = prev;
535 }
536 }
537 binder_insert_free_buffer(alloc, buffer);
538}
539
540/**
541 * binder_alloc_free_buf() - free a binder buffer
542 * @alloc: binder_alloc for this proc
543 * @buffer: kernel pointer to buffer
544 *
545 * Free the buffer allocated via binder_alloc_new_buffer()
546 */
547void binder_alloc_free_buf(struct binder_alloc *alloc,
548 struct binder_buffer *buffer)
549{
550 mutex_lock(&alloc->mutex);
551 binder_free_buf_locked(alloc, buffer);
552 mutex_unlock(&alloc->mutex);
553}
554
555/**
556 * binder_alloc_mmap_handler() - map virtual address space for proc
557 * @alloc: alloc structure for this proc
558 * @vma: vma passed to mmap()
559 *
560 * Called by binder_mmap() to initialize the space specified in
561 * vma for allocating binder buffers
562 *
563 * Return:
564 * 0 = success
565 * -EBUSY = address space already mapped
566 * -ENOMEM = failed to map memory to given address space
567 */
568int binder_alloc_mmap_handler(struct binder_alloc *alloc,
569 struct vm_area_struct *vma)
570{
571 int ret;
572 struct vm_struct *area;
573 const char *failure_string;
574 struct binder_buffer *buffer;
575
576 mutex_lock(&binder_alloc_mmap_lock);
577 if (alloc->buffer) {
578 ret = -EBUSY;
579 failure_string = "already mapped";
580 goto err_already_mapped;
581 }
582
583 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
584 if (area == NULL) {
585 ret = -ENOMEM;
586 failure_string = "get_vm_area";
587 goto err_get_vm_area_failed;
588 }
589 alloc->buffer = area->addr;
590 alloc->user_buffer_offset =
591 vma->vm_start - (uintptr_t)alloc->buffer;
592 mutex_unlock(&binder_alloc_mmap_lock);
593
594#ifdef CONFIG_CPU_CACHE_VIPT
595 if (cache_is_vipt_aliasing()) {
596 while (CACHE_COLOUR(
597 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
598 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
599 __func__, alloc->pid, vma->vm_start,
600 vma->vm_end, alloc->buffer);
601 vma->vm_start += PAGE_SIZE;
602 }
603 }
604#endif
605 alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
606 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
607 GFP_KERNEL);
608 if (alloc->pages == NULL) {
609 ret = -ENOMEM;
610 failure_string = "alloc page array";
611 goto err_alloc_pages_failed;
612 }
613 alloc->buffer_size = vma->vm_end - vma->vm_start;
614
615 if (binder_update_page_range(alloc, 1, alloc->buffer,
616 alloc->buffer + PAGE_SIZE, vma)) {
617 ret = -ENOMEM;
618 failure_string = "alloc small buf";
619 goto err_alloc_small_buf_failed;
620 }
621 buffer = alloc->buffer;
622 INIT_LIST_HEAD(&alloc->buffers);
623 list_add(&buffer->entry, &alloc->buffers);
624 buffer->free = 1;
625 binder_insert_free_buffer(alloc, buffer);
626 alloc->free_async_space = alloc->buffer_size / 2;
627 barrier();
628 alloc->vma = vma;
629 alloc->vma_vm_mm = vma->vm_mm;
630
631 return 0;
632
633err_alloc_small_buf_failed:
634 kfree(alloc->pages);
635 alloc->pages = NULL;
636err_alloc_pages_failed:
637 mutex_lock(&binder_alloc_mmap_lock);
638 vfree(alloc->buffer);
639 alloc->buffer = NULL;
640err_get_vm_area_failed:
641err_already_mapped:
642 mutex_unlock(&binder_alloc_mmap_lock);
643 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
644 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
645 return ret;
646}
647
648
649void binder_alloc_deferred_release(struct binder_alloc *alloc)
650{
651 struct rb_node *n;
652 int buffers, page_count;
653
654 BUG_ON(alloc->vma);
655
656 buffers = 0;
657 mutex_lock(&alloc->mutex);
658 while ((n = rb_first(&alloc->allocated_buffers))) {
659 struct binder_buffer *buffer;
660
661 buffer = rb_entry(n, struct binder_buffer, rb_node);
662
663 /* Transaction should already have been freed */
664 BUG_ON(buffer->transaction);
665
666 binder_free_buf_locked(alloc, buffer);
667 buffers++;
668 }
669
670 page_count = 0;
671 if (alloc->pages) {
672 int i;
673
674 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
675 void *page_addr;
676
677 if (!alloc->pages[i])
678 continue;
679
680 page_addr = alloc->buffer + i * PAGE_SIZE;
681 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
682 "%s: %d: page %d at %pK not freed\n",
683 __func__, alloc->pid, i, page_addr);
684 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
685 __free_page(alloc->pages[i]);
686 page_count++;
687 }
688 kfree(alloc->pages);
689 vfree(alloc->buffer);
690 }
691 mutex_unlock(&alloc->mutex);
692
693 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
694 "%s: %d buffers %d, pages %d\n",
695 __func__, alloc->pid, buffers, page_count);
696}
697
698static void print_binder_buffer(struct seq_file *m, const char *prefix,
699 struct binder_buffer *buffer)
700{
701 seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
702 prefix, buffer->debug_id, buffer->data,
703 buffer->data_size, buffer->offsets_size,
704 buffer->transaction ? "active" : "delivered");
705}
706
707/**
708 * binder_alloc_print_allocated() - print buffer info
709 * @m: seq_file for output via seq_printf()
710 * @alloc: binder_alloc for this proc
711 *
712 * Prints information about every buffer associated with
713 * the binder_alloc state to the given seq_file
714 */
715void binder_alloc_print_allocated(struct seq_file *m,
716 struct binder_alloc *alloc)
717{
718 struct rb_node *n;
719
720 mutex_lock(&alloc->mutex);
721 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
722 print_binder_buffer(m, " buffer",
723 rb_entry(n, struct binder_buffer, rb_node));
724 mutex_unlock(&alloc->mutex);
725}
726
727/**
728 * binder_alloc_get_allocated_count() - return count of buffers
729 * @alloc: binder_alloc for this proc
730 *
731 * Return: count of allocated buffers
732 */
733int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
734{
735 struct rb_node *n;
736 int count = 0;
737
738 mutex_lock(&alloc->mutex);
739 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
740 count++;
741 mutex_unlock(&alloc->mutex);
742 return count;
743}
744
745
746/**
747 * binder_alloc_vma_close() - invalidate address space
748 * @alloc: binder_alloc for this proc
749 *
750 * Called from binder_vma_close() when releasing address space.
751 * Clears alloc->vma to prevent new incoming transactions from
752 * allocating more buffers.
753 */
754void binder_alloc_vma_close(struct binder_alloc *alloc)
755{
756 WRITE_ONCE(alloc->vma, NULL);
757 WRITE_ONCE(alloc->vma_vm_mm, NULL);
758}
759
760/**
761 * binder_alloc_init() - called by binder_open() for per-proc initialization
762 * @alloc: binder_alloc for this proc
763 *
764 * Called from binder_open() to initialize binder_alloc fields for
765 * new binder proc
766 */
767void binder_alloc_init(struct binder_alloc *alloc)
768{
769 alloc->tsk = current->group_leader;
770 alloc->pid = current->group_leader->pid;
771 mutex_init(&alloc->mutex);
772}
773