blob: 34454b073b8297096abeae3619e27b3006decc78 [file] [log] [blame]
Todd Kjosb9341022016-10-10 10:40:53 -07001/* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <asm/cacheflush.h>
21#include <linux/list.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/rtmutex.h>
25#include <linux/rbtree.h>
26#include <linux/seq_file.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
Sherry Yang5828d702017-07-29 13:24:11 -070030#include <linux/list_lru.h>
Todd Kjosd5049492019-02-08 10:35:14 -080031#include <linux/uaccess.h>
32#include <linux/highmem.h>
Todd Kjosb9341022016-10-10 10:40:53 -070033#include "binder_alloc.h"
34#include "binder_trace.h"
35
Sherry Yang5828d702017-07-29 13:24:11 -070036struct list_lru binder_alloc_lru;
37
Todd Kjosb9341022016-10-10 10:40:53 -070038static DEFINE_MUTEX(binder_alloc_mmap_lock);
39
40enum {
41 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
42 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
43 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
44};
45static uint32_t binder_alloc_debug_mask;
46
47module_param_named(debug_mask, binder_alloc_debug_mask,
48 uint, 0644);
49
50#define binder_alloc_debug(mask, x...) \
51 do { \
52 if (binder_alloc_debug_mask & mask) \
53 pr_info(x); \
54 } while (0)
55
Sherry Yang7aed47a2017-06-30 10:22:23 -070056static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
57{
58 return list_entry(buffer->entry.next, struct binder_buffer, entry);
59}
60
61static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
62{
63 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
64}
65
Todd Kjosb9341022016-10-10 10:40:53 -070066static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
67 struct binder_buffer *buffer)
68{
69 if (list_is_last(&buffer->entry, &alloc->buffers))
Todd Kjos8539b1e2019-02-08 10:35:20 -080070 return alloc->buffer + alloc->buffer_size - buffer->user_data;
71 return binder_buffer_next(buffer)->user_data - buffer->user_data;
Todd Kjosb9341022016-10-10 10:40:53 -070072}
73
74static void binder_insert_free_buffer(struct binder_alloc *alloc,
75 struct binder_buffer *new_buffer)
76{
77 struct rb_node **p = &alloc->free_buffers.rb_node;
78 struct rb_node *parent = NULL;
79 struct binder_buffer *buffer;
80 size_t buffer_size;
81 size_t new_buffer_size;
82
83 BUG_ON(!new_buffer->free);
84
85 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
86
87 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
88 "%d: add free buffer, size %zd, at %pK\n",
89 alloc->pid, new_buffer_size, new_buffer);
90
91 while (*p) {
92 parent = *p;
93 buffer = rb_entry(parent, struct binder_buffer, rb_node);
94 BUG_ON(!buffer->free);
95
96 buffer_size = binder_alloc_buffer_size(alloc, buffer);
97
98 if (new_buffer_size < buffer_size)
99 p = &parent->rb_left;
100 else
101 p = &parent->rb_right;
102 }
103 rb_link_node(&new_buffer->rb_node, parent, p);
104 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
105}
106
107static void binder_insert_allocated_buffer_locked(
108 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
109{
110 struct rb_node **p = &alloc->allocated_buffers.rb_node;
111 struct rb_node *parent = NULL;
112 struct binder_buffer *buffer;
113
114 BUG_ON(new_buffer->free);
115
116 while (*p) {
117 parent = *p;
118 buffer = rb_entry(parent, struct binder_buffer, rb_node);
119 BUG_ON(buffer->free);
120
Todd Kjos8539b1e2019-02-08 10:35:20 -0800121 if (new_buffer->user_data < buffer->user_data)
Todd Kjosb9341022016-10-10 10:40:53 -0700122 p = &parent->rb_left;
Todd Kjos8539b1e2019-02-08 10:35:20 -0800123 else if (new_buffer->user_data > buffer->user_data)
Todd Kjosb9341022016-10-10 10:40:53 -0700124 p = &parent->rb_right;
125 else
126 BUG();
127 }
128 rb_link_node(&new_buffer->rb_node, parent, p);
129 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
130}
131
Todd Kjos076072a2017-04-21 14:32:11 -0700132static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjosb9341022016-10-10 10:40:53 -0700133 struct binder_alloc *alloc,
134 uintptr_t user_ptr)
135{
136 struct rb_node *n = alloc->allocated_buffers.rb_node;
137 struct binder_buffer *buffer;
Todd Kjos8539b1e2019-02-08 10:35:20 -0800138 void __user *uptr;
Todd Kjosb9341022016-10-10 10:40:53 -0700139
Todd Kjos8539b1e2019-02-08 10:35:20 -0800140 uptr = (void __user *)user_ptr;
Todd Kjosb9341022016-10-10 10:40:53 -0700141
142 while (n) {
143 buffer = rb_entry(n, struct binder_buffer, rb_node);
144 BUG_ON(buffer->free);
145
Todd Kjos8539b1e2019-02-08 10:35:20 -0800146 if (uptr < buffer->user_data)
Todd Kjosb9341022016-10-10 10:40:53 -0700147 n = n->rb_left;
Todd Kjos8539b1e2019-02-08 10:35:20 -0800148 else if (uptr > buffer->user_data)
Todd Kjosb9341022016-10-10 10:40:53 -0700149 n = n->rb_right;
Todd Kjos076072a2017-04-21 14:32:11 -0700150 else {
151 /*
152 * Guard against user threads attempting to
Todd Kjosd29b73e2018-11-06 15:55:32 -0800153 * free the buffer when in use by kernel or
154 * after it's already been freed.
Todd Kjos076072a2017-04-21 14:32:11 -0700155 */
Todd Kjosd29b73e2018-11-06 15:55:32 -0800156 if (!buffer->allow_user_free)
157 return ERR_PTR(-EPERM);
158 buffer->allow_user_free = 0;
Todd Kjosb9341022016-10-10 10:40:53 -0700159 return buffer;
Todd Kjos076072a2017-04-21 14:32:11 -0700160 }
Todd Kjosb9341022016-10-10 10:40:53 -0700161 }
162 return NULL;
163}
164
165/**
166 * binder_alloc_buffer_lookup() - get buffer given user ptr
167 * @alloc: binder_alloc for this proc
168 * @user_ptr: User pointer to buffer data
169 *
170 * Validate userspace pointer to buffer data and return buffer corresponding to
171 * that user pointer. Search the rb tree for buffer that matches user data
172 * pointer.
173 *
174 * Return: Pointer to buffer or NULL
175 */
Todd Kjos076072a2017-04-21 14:32:11 -0700176struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
177 uintptr_t user_ptr)
Todd Kjosb9341022016-10-10 10:40:53 -0700178{
179 struct binder_buffer *buffer;
180
181 mutex_lock(&alloc->mutex);
Todd Kjos076072a2017-04-21 14:32:11 -0700182 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Todd Kjosb9341022016-10-10 10:40:53 -0700183 mutex_unlock(&alloc->mutex);
184 return buffer;
185}
186
187static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
Todd Kjos8539b1e2019-02-08 10:35:20 -0800188 void __user *start, void __user *end)
Todd Kjosb9341022016-10-10 10:40:53 -0700189{
Todd Kjos8539b1e2019-02-08 10:35:20 -0800190 void __user *page_addr;
Todd Kjosb9341022016-10-10 10:40:53 -0700191 unsigned long user_page_addr;
Sherry Yang5828d702017-07-29 13:24:11 -0700192 struct binder_lru_page *page;
Sherry Yangedd21312017-09-15 20:40:03 -0400193 struct vm_area_struct *vma = NULL;
Sherry Yang5828d702017-07-29 13:24:11 -0700194 struct mm_struct *mm = NULL;
195 bool need_mm = false;
Todd Kjosb9341022016-10-10 10:40:53 -0700196
197 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
198 "%d: %s pages %pK-%pK\n", alloc->pid,
199 allocate ? "allocate" : "free", start, end);
200
201 if (end <= start)
202 return 0;
203
204 trace_binder_update_page_range(alloc, allocate, start, end);
205
Sherry Yang5828d702017-07-29 13:24:11 -0700206 if (allocate == 0)
207 goto free_range;
208
209 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
210 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
211 if (!page->page_ptr) {
212 need_mm = true;
213 break;
214 }
215 }
216
Sherry Yang5e34c9b2017-09-15 21:12:15 -0400217 if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
218 mm = alloc->vma_vm_mm;
Todd Kjosb9341022016-10-10 10:40:53 -0700219
220 if (mm) {
Minchan Kim2cafd5b2018-05-07 23:15:37 +0900221 down_read(&mm->mmap_sem);
Todd Kjosb9341022016-10-10 10:40:53 -0700222 vma = alloc->vma;
Todd Kjosb9341022016-10-10 10:40:53 -0700223 }
224
Sherry Yang5828d702017-07-29 13:24:11 -0700225 if (!vma && need_mm) {
Todd Kjosb9341022016-10-10 10:40:53 -0700226 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
227 alloc->pid);
228 goto err_no_vma;
229 }
230
231 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
232 int ret;
Sherry Yang5828d702017-07-29 13:24:11 -0700233 bool on_lru;
Sherry Yang4a61ba62017-08-02 14:02:37 -0700234 size_t index;
Todd Kjosb9341022016-10-10 10:40:53 -0700235
Sherry Yang4a61ba62017-08-02 14:02:37 -0700236 index = (page_addr - alloc->buffer) / PAGE_SIZE;
237 page = &alloc->pages[index];
Todd Kjosb9341022016-10-10 10:40:53 -0700238
Sherry Yang5828d702017-07-29 13:24:11 -0700239 if (page->page_ptr) {
Sherry Yang4a61ba62017-08-02 14:02:37 -0700240 trace_binder_alloc_lru_start(alloc, index);
241
Sherry Yang5828d702017-07-29 13:24:11 -0700242 on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
243 WARN_ON(!on_lru);
Sherry Yang4a61ba62017-08-02 14:02:37 -0700244
245 trace_binder_alloc_lru_end(alloc, index);
Sherry Yang5828d702017-07-29 13:24:11 -0700246 continue;
247 }
248
249 if (WARN_ON(!vma))
250 goto err_page_ptr_cleared;
251
Sherry Yang4a61ba62017-08-02 14:02:37 -0700252 trace_binder_alloc_page_start(alloc, index);
Sherry Yang5828d702017-07-29 13:24:11 -0700253 page->page_ptr = alloc_page(GFP_KERNEL |
254 __GFP_HIGHMEM |
255 __GFP_ZERO);
256 if (!page->page_ptr) {
Todd Kjosb9341022016-10-10 10:40:53 -0700257 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
258 alloc->pid, page_addr);
259 goto err_alloc_page_failed;
260 }
Sherry Yang5828d702017-07-29 13:24:11 -0700261 page->alloc = alloc;
262 INIT_LIST_HEAD(&page->lru);
263
Todd Kjos8d24e2a2019-02-08 10:35:19 -0800264 user_page_addr = (uintptr_t)page_addr;
Sherry Yang5828d702017-07-29 13:24:11 -0700265 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
Todd Kjosb9341022016-10-10 10:40:53 -0700266 if (ret) {
267 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
268 alloc->pid, user_page_addr);
269 goto err_vm_insert_page_failed;
270 }
Sherry Yang4a61ba62017-08-02 14:02:37 -0700271
Martijn Coenenc05ec292017-10-24 16:37:39 +0200272 if (index + 1 > alloc->pages_high)
273 alloc->pages_high = index + 1;
274
Sherry Yang4a61ba62017-08-02 14:02:37 -0700275 trace_binder_alloc_page_end(alloc, index);
Todd Kjosb9341022016-10-10 10:40:53 -0700276 /* vm_insert_page does not seem to increment the refcount */
277 }
278 if (mm) {
Minchan Kim2cafd5b2018-05-07 23:15:37 +0900279 up_read(&mm->mmap_sem);
Todd Kjosb9341022016-10-10 10:40:53 -0700280 mmput(mm);
281 }
282 return 0;
283
284free_range:
285 for (page_addr = end - PAGE_SIZE; page_addr >= start;
286 page_addr -= PAGE_SIZE) {
Sherry Yang5828d702017-07-29 13:24:11 -0700287 bool ret;
Sherry Yang4a61ba62017-08-02 14:02:37 -0700288 size_t index;
Sherry Yang5828d702017-07-29 13:24:11 -0700289
Sherry Yang4a61ba62017-08-02 14:02:37 -0700290 index = (page_addr - alloc->buffer) / PAGE_SIZE;
291 page = &alloc->pages[index];
292
293 trace_binder_free_lru_start(alloc, index);
Sherry Yang5828d702017-07-29 13:24:11 -0700294
295 ret = list_lru_add(&binder_alloc_lru, &page->lru);
296 WARN_ON(!ret);
Sherry Yang4a61ba62017-08-02 14:02:37 -0700297
298 trace_binder_free_lru_end(alloc, index);
Sherry Yang5828d702017-07-29 13:24:11 -0700299 continue;
300
Todd Kjosb9341022016-10-10 10:40:53 -0700301err_vm_insert_page_failed:
Sherry Yang5828d702017-07-29 13:24:11 -0700302 __free_page(page->page_ptr);
303 page->page_ptr = NULL;
Todd Kjosb9341022016-10-10 10:40:53 -0700304err_alloc_page_failed:
Sherry Yang5828d702017-07-29 13:24:11 -0700305err_page_ptr_cleared:
Todd Kjosb9341022016-10-10 10:40:53 -0700306 ;
307 }
308err_no_vma:
309 if (mm) {
Minchan Kim2cafd5b2018-05-07 23:15:37 +0900310 up_read(&mm->mmap_sem);
Todd Kjosb9341022016-10-10 10:40:53 -0700311 mmput(mm);
312 }
Todd Kjose598d172017-03-22 17:19:52 -0700313 return vma ? -ENOMEM : -ESRCH;
Todd Kjosb9341022016-10-10 10:40:53 -0700314}
315
Xiongwei Song9e9a3e12017-12-14 12:15:42 +0800316static struct binder_buffer *binder_alloc_new_buf_locked(
317 struct binder_alloc *alloc,
318 size_t data_size,
319 size_t offsets_size,
320 size_t extra_buffers_size,
321 int is_async)
Todd Kjosb9341022016-10-10 10:40:53 -0700322{
323 struct rb_node *n = alloc->free_buffers.rb_node;
324 struct binder_buffer *buffer;
325 size_t buffer_size;
326 struct rb_node *best_fit = NULL;
Todd Kjos8539b1e2019-02-08 10:35:20 -0800327 void __user *has_page_addr;
328 void __user *end_page_addr;
Todd Kjosb9341022016-10-10 10:40:53 -0700329 size_t size, data_offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700330 int ret;
Todd Kjosb9341022016-10-10 10:40:53 -0700331
332 if (alloc->vma == NULL) {
333 pr_err("%d: binder_alloc_buf, no vma\n",
334 alloc->pid);
Todd Kjose598d172017-03-22 17:19:52 -0700335 return ERR_PTR(-ESRCH);
Todd Kjosb9341022016-10-10 10:40:53 -0700336 }
337
338 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
339 ALIGN(offsets_size, sizeof(void *));
340
341 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
342 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
343 "%d: got transaction with invalid size %zd-%zd\n",
344 alloc->pid, data_size, offsets_size);
Todd Kjose598d172017-03-22 17:19:52 -0700345 return ERR_PTR(-EINVAL);
Todd Kjosb9341022016-10-10 10:40:53 -0700346 }
347 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
348 if (size < data_offsets_size || size < extra_buffers_size) {
349 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
350 "%d: got transaction with invalid extra_buffers_size %zd\n",
351 alloc->pid, extra_buffers_size);
Todd Kjose598d172017-03-22 17:19:52 -0700352 return ERR_PTR(-EINVAL);
Todd Kjosb9341022016-10-10 10:40:53 -0700353 }
354 if (is_async &&
355 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
356 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
357 "%d: binder_alloc_buf size %zd failed, no async space left\n",
358 alloc->pid, size);
Todd Kjose598d172017-03-22 17:19:52 -0700359 return ERR_PTR(-ENOSPC);
Todd Kjosb9341022016-10-10 10:40:53 -0700360 }
361
Sherry Yang7ffd4942017-08-03 11:33:53 -0700362 /* Pad 0-size buffers so they get assigned unique addresses */
363 size = max(size, sizeof(void *));
364
Todd Kjosb9341022016-10-10 10:40:53 -0700365 while (n) {
366 buffer = rb_entry(n, struct binder_buffer, rb_node);
367 BUG_ON(!buffer->free);
368 buffer_size = binder_alloc_buffer_size(alloc, buffer);
369
370 if (size < buffer_size) {
371 best_fit = n;
372 n = n->rb_left;
373 } else if (size > buffer_size)
374 n = n->rb_right;
375 else {
376 best_fit = n;
377 break;
378 }
379 }
380 if (best_fit == NULL) {
Martijn Coenen970df8b2017-03-15 18:22:52 +0100381 size_t allocated_buffers = 0;
382 size_t largest_alloc_size = 0;
383 size_t total_alloc_size = 0;
384 size_t free_buffers = 0;
385 size_t largest_free_size = 0;
386 size_t total_free_size = 0;
387
388 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
389 n = rb_next(n)) {
390 buffer = rb_entry(n, struct binder_buffer, rb_node);
391 buffer_size = binder_alloc_buffer_size(alloc, buffer);
392 allocated_buffers++;
393 total_alloc_size += buffer_size;
394 if (buffer_size > largest_alloc_size)
395 largest_alloc_size = buffer_size;
396 }
397 for (n = rb_first(&alloc->free_buffers); n != NULL;
398 n = rb_next(n)) {
399 buffer = rb_entry(n, struct binder_buffer, rb_node);
400 buffer_size = binder_alloc_buffer_size(alloc, buffer);
401 free_buffers++;
402 total_free_size += buffer_size;
403 if (buffer_size > largest_free_size)
404 largest_free_size = buffer_size;
405 }
Todd Kjosb9341022016-10-10 10:40:53 -0700406 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
407 alloc->pid, size);
Martijn Coenen970df8b2017-03-15 18:22:52 +0100408 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
409 total_alloc_size, allocated_buffers, largest_alloc_size,
410 total_free_size, free_buffers, largest_free_size);
Todd Kjose598d172017-03-22 17:19:52 -0700411 return ERR_PTR(-ENOSPC);
Todd Kjosb9341022016-10-10 10:40:53 -0700412 }
413 if (n == NULL) {
414 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
415 buffer_size = binder_alloc_buffer_size(alloc, buffer);
416 }
417
418 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
419 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
420 alloc->pid, size, buffer, buffer_size);
421
Todd Kjos8539b1e2019-02-08 10:35:20 -0800422 has_page_addr = (void __user *)
423 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700424 WARN_ON(n && buffer_size != size);
Todd Kjosb9341022016-10-10 10:40:53 -0700425 end_page_addr =
Todd Kjos8539b1e2019-02-08 10:35:20 -0800426 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
Todd Kjosb9341022016-10-10 10:40:53 -0700427 if (end_page_addr > has_page_addr)
428 end_page_addr = has_page_addr;
Todd Kjos8539b1e2019-02-08 10:35:20 -0800429 ret = binder_update_page_range(alloc, 1, (void __user *)
430 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
Todd Kjose598d172017-03-22 17:19:52 -0700431 if (ret)
432 return ERR_PTR(ret);
Todd Kjosb9341022016-10-10 10:40:53 -0700433
Todd Kjosb9341022016-10-10 10:40:53 -0700434 if (buffer_size != size) {
Sherry Yang7ffd4942017-08-03 11:33:53 -0700435 struct binder_buffer *new_buffer;
Todd Kjosb9341022016-10-10 10:40:53 -0700436
Sherry Yang7ffd4942017-08-03 11:33:53 -0700437 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
438 if (!new_buffer) {
439 pr_err("%s: %d failed to alloc new buffer struct\n",
440 __func__, alloc->pid);
441 goto err_alloc_buf_struct_failed;
442 }
Todd Kjos8539b1e2019-02-08 10:35:20 -0800443 new_buffer->user_data = (u8 __user *)buffer->user_data + size;
Todd Kjosb9341022016-10-10 10:40:53 -0700444 list_add(&new_buffer->entry, &buffer->entry);
445 new_buffer->free = 1;
446 binder_insert_free_buffer(alloc, new_buffer);
447 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700448
449 rb_erase(best_fit, &alloc->free_buffers);
450 buffer->free = 0;
Todd Kjosd29b73e2018-11-06 15:55:32 -0800451 buffer->allow_user_free = 0;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700452 binder_insert_allocated_buffer_locked(alloc, buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700453 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
454 "%d: binder_alloc_buf size %zd got %pK\n",
455 alloc->pid, size, buffer);
456 buffer->data_size = data_size;
457 buffer->offsets_size = offsets_size;
458 buffer->async_transaction = is_async;
459 buffer->extra_buffers_size = extra_buffers_size;
460 if (is_async) {
461 alloc->free_async_space -= size + sizeof(struct binder_buffer);
462 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
463 "%d: binder_alloc_buf size %zd async free %zd\n",
464 alloc->pid, size, alloc->free_async_space);
465 }
466 return buffer;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700467
468err_alloc_buf_struct_failed:
Todd Kjos8539b1e2019-02-08 10:35:20 -0800469 binder_update_page_range(alloc, 0, (void __user *)
470 PAGE_ALIGN((uintptr_t)buffer->user_data),
Sherry Yangedd21312017-09-15 20:40:03 -0400471 end_page_addr);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700472 return ERR_PTR(-ENOMEM);
Todd Kjosb9341022016-10-10 10:40:53 -0700473}
474
475/**
476 * binder_alloc_new_buf() - Allocate a new binder buffer
477 * @alloc: binder_alloc for this proc
478 * @data_size: size of user data buffer
479 * @offsets_size: user specified buffer offset
480 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
481 * @is_async: buffer for async transaction
482 *
483 * Allocate a new buffer given the requested sizes. Returns
484 * the kernel version of the buffer pointer. The size allocated
485 * is the sum of the three given sizes (each rounded up to
486 * pointer-sized boundary)
487 *
488 * Return: The allocated buffer or %NULL if error
489 */
490struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
491 size_t data_size,
492 size_t offsets_size,
493 size_t extra_buffers_size,
494 int is_async)
495{
496 struct binder_buffer *buffer;
497
498 mutex_lock(&alloc->mutex);
499 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
500 extra_buffers_size, is_async);
501 mutex_unlock(&alloc->mutex);
502 return buffer;
503}
504
Todd Kjos8539b1e2019-02-08 10:35:20 -0800505static void __user *buffer_start_page(struct binder_buffer *buffer)
Todd Kjosb9341022016-10-10 10:40:53 -0700506{
Todd Kjos8539b1e2019-02-08 10:35:20 -0800507 return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
Todd Kjosb9341022016-10-10 10:40:53 -0700508}
509
Todd Kjos8539b1e2019-02-08 10:35:20 -0800510static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
Todd Kjosb9341022016-10-10 10:40:53 -0700511{
Todd Kjos8539b1e2019-02-08 10:35:20 -0800512 return (void __user *)
513 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
Todd Kjosb9341022016-10-10 10:40:53 -0700514}
515
516static void binder_delete_free_buffer(struct binder_alloc *alloc,
517 struct binder_buffer *buffer)
518{
519 struct binder_buffer *prev, *next = NULL;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700520 bool to_free = true;
Todd Kjosb9341022016-10-10 10:40:53 -0700521 BUG_ON(alloc->buffers.next == &buffer->entry);
Sherry Yang7aed47a2017-06-30 10:22:23 -0700522 prev = binder_buffer_prev(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700523 BUG_ON(!prev->free);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700524 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
525 to_free = false;
Todd Kjosb9341022016-10-10 10:40:53 -0700526 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang7ffd4942017-08-03 11:33:53 -0700527 "%d: merge free, buffer %pK share page with %pK\n",
Todd Kjos8539b1e2019-02-08 10:35:20 -0800528 alloc->pid, buffer->user_data,
529 prev->user_data);
Todd Kjosb9341022016-10-10 10:40:53 -0700530 }
531
532 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yang7aed47a2017-06-30 10:22:23 -0700533 next = binder_buffer_next(buffer);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700534 if (buffer_start_page(next) == buffer_start_page(buffer)) {
535 to_free = false;
Todd Kjosb9341022016-10-10 10:40:53 -0700536 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang7ffd4942017-08-03 11:33:53 -0700537 "%d: merge free, buffer %pK share page with %pK\n",
538 alloc->pid,
Todd Kjos8539b1e2019-02-08 10:35:20 -0800539 buffer->user_data,
540 next->user_data);
Todd Kjosb9341022016-10-10 10:40:53 -0700541 }
542 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700543
Todd Kjos8539b1e2019-02-08 10:35:20 -0800544 if (PAGE_ALIGNED(buffer->user_data)) {
Todd Kjosb9341022016-10-10 10:40:53 -0700545 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang7ffd4942017-08-03 11:33:53 -0700546 "%d: merge free, buffer start %pK is page aligned\n",
Todd Kjos8539b1e2019-02-08 10:35:20 -0800547 alloc->pid, buffer->user_data);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700548 to_free = false;
Todd Kjosb9341022016-10-10 10:40:53 -0700549 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700550
551 if (to_free) {
552 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
553 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
Todd Kjos8539b1e2019-02-08 10:35:20 -0800554 alloc->pid, buffer->user_data,
555 prev->user_data,
556 next ? next->user_data : NULL);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700557 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
Sherry Yangedd21312017-09-15 20:40:03 -0400558 buffer_start_page(buffer) + PAGE_SIZE);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700559 }
560 list_del(&buffer->entry);
561 kfree(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700562}
563
564static void binder_free_buf_locked(struct binder_alloc *alloc,
565 struct binder_buffer *buffer)
566{
567 size_t size, buffer_size;
568
569 buffer_size = binder_alloc_buffer_size(alloc, buffer);
570
571 size = ALIGN(buffer->data_size, sizeof(void *)) +
572 ALIGN(buffer->offsets_size, sizeof(void *)) +
573 ALIGN(buffer->extra_buffers_size, sizeof(void *));
574
575 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
576 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
577 alloc->pid, buffer, size, buffer_size);
578
579 BUG_ON(buffer->free);
580 BUG_ON(size > buffer_size);
581 BUG_ON(buffer->transaction != NULL);
Todd Kjos8539b1e2019-02-08 10:35:20 -0800582 BUG_ON(buffer->user_data < alloc->buffer);
583 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
Todd Kjosb9341022016-10-10 10:40:53 -0700584
585 if (buffer->async_transaction) {
586 alloc->free_async_space += size + sizeof(struct binder_buffer);
587
588 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
589 "%d: binder_free_buf size %zd async free %zd\n",
590 alloc->pid, size, alloc->free_async_space);
591 }
592
593 binder_update_page_range(alloc, 0,
Todd Kjos8539b1e2019-02-08 10:35:20 -0800594 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
595 (void __user *)(((uintptr_t)
596 buffer->user_data + buffer_size) & PAGE_MASK));
Todd Kjosb9341022016-10-10 10:40:53 -0700597
598 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
599 buffer->free = 1;
600 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yang7aed47a2017-06-30 10:22:23 -0700601 struct binder_buffer *next = binder_buffer_next(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700602
603 if (next->free) {
604 rb_erase(&next->rb_node, &alloc->free_buffers);
605 binder_delete_free_buffer(alloc, next);
606 }
607 }
608 if (alloc->buffers.next != &buffer->entry) {
Sherry Yang7aed47a2017-06-30 10:22:23 -0700609 struct binder_buffer *prev = binder_buffer_prev(buffer);
Todd Kjosb9341022016-10-10 10:40:53 -0700610
611 if (prev->free) {
612 binder_delete_free_buffer(alloc, buffer);
613 rb_erase(&prev->rb_node, &alloc->free_buffers);
614 buffer = prev;
615 }
616 }
617 binder_insert_free_buffer(alloc, buffer);
618}
619
620/**
621 * binder_alloc_free_buf() - free a binder buffer
622 * @alloc: binder_alloc for this proc
623 * @buffer: kernel pointer to buffer
624 *
625 * Free the buffer allocated via binder_alloc_new_buffer()
626 */
627void binder_alloc_free_buf(struct binder_alloc *alloc,
628 struct binder_buffer *buffer)
629{
630 mutex_lock(&alloc->mutex);
631 binder_free_buf_locked(alloc, buffer);
632 mutex_unlock(&alloc->mutex);
633}
634
635/**
636 * binder_alloc_mmap_handler() - map virtual address space for proc
637 * @alloc: alloc structure for this proc
638 * @vma: vma passed to mmap()
639 *
640 * Called by binder_mmap() to initialize the space specified in
641 * vma for allocating binder buffers
642 *
643 * Return:
644 * 0 = success
645 * -EBUSY = address space already mapped
646 * -ENOMEM = failed to map memory to given address space
647 */
648int binder_alloc_mmap_handler(struct binder_alloc *alloc,
649 struct vm_area_struct *vma)
650{
651 int ret;
Todd Kjosb9341022016-10-10 10:40:53 -0700652 const char *failure_string;
653 struct binder_buffer *buffer;
654
655 mutex_lock(&binder_alloc_mmap_lock);
656 if (alloc->buffer) {
657 ret = -EBUSY;
658 failure_string = "already mapped";
659 goto err_already_mapped;
660 }
661
Todd Kjos8539b1e2019-02-08 10:35:20 -0800662 alloc->buffer = (void __user *)vma->vm_start;
Todd Kjosb9341022016-10-10 10:40:53 -0700663 mutex_unlock(&binder_alloc_mmap_lock);
664
Todd Kjosb9341022016-10-10 10:40:53 -0700665 alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
666 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
667 GFP_KERNEL);
668 if (alloc->pages == NULL) {
669 ret = -ENOMEM;
670 failure_string = "alloc page array";
671 goto err_alloc_pages_failed;
672 }
673 alloc->buffer_size = vma->vm_end - vma->vm_start;
674
Sherry Yang7ffd4942017-08-03 11:33:53 -0700675 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
676 if (!buffer) {
Todd Kjosb9341022016-10-10 10:40:53 -0700677 ret = -ENOMEM;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700678 failure_string = "alloc buffer struct";
679 goto err_alloc_buf_struct_failed;
Todd Kjosb9341022016-10-10 10:40:53 -0700680 }
Sherry Yang7ffd4942017-08-03 11:33:53 -0700681
Todd Kjos8539b1e2019-02-08 10:35:20 -0800682 buffer->user_data = alloc->buffer;
Todd Kjosb9341022016-10-10 10:40:53 -0700683 list_add(&buffer->entry, &alloc->buffers);
684 buffer->free = 1;
685 binder_insert_free_buffer(alloc, buffer);
686 alloc->free_async_space = alloc->buffer_size / 2;
687 barrier();
688 alloc->vma = vma;
689 alloc->vma_vm_mm = vma->vm_mm;
Sherry Yang5e34c9b2017-09-15 21:12:15 -0400690 /* Same as mmgrab() in later kernel versions */
691 atomic_inc(&alloc->vma_vm_mm->mm_count);
Todd Kjosb9341022016-10-10 10:40:53 -0700692
693 return 0;
694
Sherry Yang7ffd4942017-08-03 11:33:53 -0700695err_alloc_buf_struct_failed:
Todd Kjosb9341022016-10-10 10:40:53 -0700696 kfree(alloc->pages);
697 alloc->pages = NULL;
698err_alloc_pages_failed:
699 mutex_lock(&binder_alloc_mmap_lock);
Todd Kjosb9341022016-10-10 10:40:53 -0700700 alloc->buffer = NULL;
Todd Kjosb9341022016-10-10 10:40:53 -0700701err_already_mapped:
702 mutex_unlock(&binder_alloc_mmap_lock);
703 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
704 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
705 return ret;
706}
707
708
709void binder_alloc_deferred_release(struct binder_alloc *alloc)
710{
711 struct rb_node *n;
712 int buffers, page_count;
Sherry Yang7ffd4942017-08-03 11:33:53 -0700713 struct binder_buffer *buffer;
Todd Kjosb9341022016-10-10 10:40:53 -0700714
715 BUG_ON(alloc->vma);
716
717 buffers = 0;
718 mutex_lock(&alloc->mutex);
719 while ((n = rb_first(&alloc->allocated_buffers))) {
Todd Kjosb9341022016-10-10 10:40:53 -0700720 buffer = rb_entry(n, struct binder_buffer, rb_node);
721
722 /* Transaction should already have been freed */
723 BUG_ON(buffer->transaction);
724
725 binder_free_buf_locked(alloc, buffer);
726 buffers++;
727 }
728
Sherry Yang7ffd4942017-08-03 11:33:53 -0700729 while (!list_empty(&alloc->buffers)) {
730 buffer = list_first_entry(&alloc->buffers,
731 struct binder_buffer, entry);
732 WARN_ON(!buffer->free);
733
734 list_del(&buffer->entry);
735 WARN_ON_ONCE(!list_empty(&alloc->buffers));
736 kfree(buffer);
737 }
738
Todd Kjosb9341022016-10-10 10:40:53 -0700739 page_count = 0;
740 if (alloc->pages) {
741 int i;
742
743 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
Todd Kjos8539b1e2019-02-08 10:35:20 -0800744 void __user *page_addr;
Sherry Yang5828d702017-07-29 13:24:11 -0700745 bool on_lru;
Todd Kjosb9341022016-10-10 10:40:53 -0700746
Sherry Yang5828d702017-07-29 13:24:11 -0700747 if (!alloc->pages[i].page_ptr)
Todd Kjosb9341022016-10-10 10:40:53 -0700748 continue;
749
Sherry Yang5828d702017-07-29 13:24:11 -0700750 on_lru = list_lru_del(&binder_alloc_lru,
751 &alloc->pages[i].lru);
Todd Kjosb9341022016-10-10 10:40:53 -0700752 page_addr = alloc->buffer + i * PAGE_SIZE;
753 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang5828d702017-07-29 13:24:11 -0700754 "%s: %d: page %d at %pK %s\n",
755 __func__, alloc->pid, i, page_addr,
756 on_lru ? "on lru" : "active");
Sherry Yang5828d702017-07-29 13:24:11 -0700757 __free_page(alloc->pages[i].page_ptr);
Todd Kjosb9341022016-10-10 10:40:53 -0700758 page_count++;
759 }
760 kfree(alloc->pages);
Todd Kjosb9341022016-10-10 10:40:53 -0700761 }
762 mutex_unlock(&alloc->mutex);
Sherry Yang5e34c9b2017-09-15 21:12:15 -0400763 if (alloc->vma_vm_mm)
764 mmdrop(alloc->vma_vm_mm);
Todd Kjosb9341022016-10-10 10:40:53 -0700765
766 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
767 "%s: %d buffers %d, pages %d\n",
768 __func__, alloc->pid, buffers, page_count);
769}
770
771static void print_binder_buffer(struct seq_file *m, const char *prefix,
772 struct binder_buffer *buffer)
773{
Martijn Coenen970df8b2017-03-15 18:22:52 +0100774 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
Todd Kjos8539b1e2019-02-08 10:35:20 -0800775 prefix, buffer->debug_id, buffer->user_data,
Todd Kjosb9341022016-10-10 10:40:53 -0700776 buffer->data_size, buffer->offsets_size,
Martijn Coenen970df8b2017-03-15 18:22:52 +0100777 buffer->extra_buffers_size,
Todd Kjosb9341022016-10-10 10:40:53 -0700778 buffer->transaction ? "active" : "delivered");
779}
780
781/**
782 * binder_alloc_print_allocated() - print buffer info
783 * @m: seq_file for output via seq_printf()
784 * @alloc: binder_alloc for this proc
785 *
786 * Prints information about every buffer associated with
787 * the binder_alloc state to the given seq_file
788 */
789void binder_alloc_print_allocated(struct seq_file *m,
790 struct binder_alloc *alloc)
791{
792 struct rb_node *n;
793
794 mutex_lock(&alloc->mutex);
795 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
796 print_binder_buffer(m, " buffer",
797 rb_entry(n, struct binder_buffer, rb_node));
798 mutex_unlock(&alloc->mutex);
799}
800
801/**
Sherry Yang91004422017-08-22 17:26:57 -0700802 * binder_alloc_print_pages() - print page usage
803 * @m: seq_file for output via seq_printf()
804 * @alloc: binder_alloc for this proc
805 */
806void binder_alloc_print_pages(struct seq_file *m,
807 struct binder_alloc *alloc)
808{
809 struct binder_lru_page *page;
810 int i;
811 int active = 0;
812 int lru = 0;
813 int free = 0;
814
815 mutex_lock(&alloc->mutex);
816 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
817 page = &alloc->pages[i];
818 if (!page->page_ptr)
819 free++;
820 else if (list_empty(&page->lru))
821 active++;
822 else
823 lru++;
824 }
825 mutex_unlock(&alloc->mutex);
826 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
Martijn Coenenc05ec292017-10-24 16:37:39 +0200827 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
Sherry Yang91004422017-08-22 17:26:57 -0700828}
829
830/**
Todd Kjosb9341022016-10-10 10:40:53 -0700831 * binder_alloc_get_allocated_count() - return count of buffers
832 * @alloc: binder_alloc for this proc
833 *
834 * Return: count of allocated buffers
835 */
836int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
837{
838 struct rb_node *n;
839 int count = 0;
840
841 mutex_lock(&alloc->mutex);
842 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
843 count++;
844 mutex_unlock(&alloc->mutex);
845 return count;
846}
847
848
849/**
850 * binder_alloc_vma_close() - invalidate address space
851 * @alloc: binder_alloc for this proc
852 *
853 * Called from binder_vma_close() when releasing address space.
854 * Clears alloc->vma to prevent new incoming transactions from
855 * allocating more buffers.
856 */
857void binder_alloc_vma_close(struct binder_alloc *alloc)
858{
859 WRITE_ONCE(alloc->vma, NULL);
Todd Kjosb9341022016-10-10 10:40:53 -0700860}
861
862/**
Sherry Yang5828d702017-07-29 13:24:11 -0700863 * binder_alloc_free_page() - shrinker callback to free pages
864 * @item: item to free
865 * @lock: lock protecting the item
866 * @cb_arg: callback argument
867 *
868 * Called from list_lru_walk() in binder_shrink_scan() to free
869 * up pages when the system is under memory pressure.
870 */
871enum lru_status binder_alloc_free_page(struct list_head *item,
872 struct list_lru_one *lru,
873 spinlock_t *lock,
874 void *cb_arg)
875{
876 struct mm_struct *mm = NULL;
877 struct binder_lru_page *page = container_of(item,
878 struct binder_lru_page,
879 lru);
880 struct binder_alloc *alloc;
881 uintptr_t page_addr;
882 size_t index;
Sherry Yange3368fd2017-09-08 02:09:26 -0400883 struct vm_area_struct *vma;
Sherry Yang5828d702017-07-29 13:24:11 -0700884
885 alloc = page->alloc;
886 if (!mutex_trylock(&alloc->mutex))
887 goto err_get_alloc_mutex_failed;
888
889 if (!page->page_ptr)
890 goto err_page_already_freed;
891
892 index = page - alloc->pages;
893 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
Sherry Yange3368fd2017-09-08 02:09:26 -0400894 vma = alloc->vma;
895 if (vma) {
Sherry Yang5e34c9b2017-09-15 21:12:15 -0400896 if (!mmget_not_zero(alloc->vma_vm_mm))
897 goto err_mmget;
898 mm = alloc->vma_vm_mm;
Sherry Yang5828d702017-07-29 13:24:11 -0700899 if (!down_write_trylock(&mm->mmap_sem))
900 goto err_down_write_mmap_sem_failed;
Sherry Yange3368fd2017-09-08 02:09:26 -0400901 }
Sherry Yang5828d702017-07-29 13:24:11 -0700902
Sherry Yange3368fd2017-09-08 02:09:26 -0400903 list_lru_isolate(lru, item);
904 spin_unlock(lock);
905
906 if (vma) {
Sherry Yang4a61ba62017-08-02 14:02:37 -0700907 trace_binder_unmap_user_start(alloc, index);
908
Todd Kjos8d24e2a2019-02-08 10:35:19 -0800909 zap_page_range(vma, page_addr, PAGE_SIZE, NULL);
Sherry Yang5828d702017-07-29 13:24:11 -0700910
Sherry Yang4a61ba62017-08-02 14:02:37 -0700911 trace_binder_unmap_user_end(alloc, index);
912
Sherry Yang5828d702017-07-29 13:24:11 -0700913 up_write(&mm->mmap_sem);
914 mmput(mm);
915 }
916
Sherry Yang4a61ba62017-08-02 14:02:37 -0700917 trace_binder_unmap_kernel_start(alloc, index);
918
Sherry Yang5828d702017-07-29 13:24:11 -0700919 __free_page(page->page_ptr);
920 page->page_ptr = NULL;
921
Sherry Yang4a61ba62017-08-02 14:02:37 -0700922 trace_binder_unmap_kernel_end(alloc, index);
923
Sherry Yange3368fd2017-09-08 02:09:26 -0400924 spin_lock(lock);
Sherry Yang5828d702017-07-29 13:24:11 -0700925 mutex_unlock(&alloc->mutex);
Sherry Yange3368fd2017-09-08 02:09:26 -0400926 return LRU_REMOVED_RETRY;
Sherry Yang5828d702017-07-29 13:24:11 -0700927
928err_down_write_mmap_sem_failed:
Sherry Yange3368fd2017-09-08 02:09:26 -0400929 mmput_async(mm);
Sherry Yang5e34c9b2017-09-15 21:12:15 -0400930err_mmget:
Sherry Yang5828d702017-07-29 13:24:11 -0700931err_page_already_freed:
932 mutex_unlock(&alloc->mutex);
933err_get_alloc_mutex_failed:
934 return LRU_SKIP;
935}
936
937static unsigned long
938binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
939{
940 unsigned long ret = list_lru_count(&binder_alloc_lru);
941 return ret;
942}
943
944static unsigned long
945binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
946{
947 unsigned long ret;
948
949 ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
950 NULL, sc->nr_to_scan);
951 return ret;
952}
953
Sherry Yang0b338532017-10-05 17:00:57 -0400954static struct shrinker binder_shrinker = {
Sherry Yang5828d702017-07-29 13:24:11 -0700955 .count_objects = binder_shrink_count,
956 .scan_objects = binder_shrink_scan,
957 .seeks = DEFAULT_SEEKS,
958};
959
960/**
Todd Kjosb9341022016-10-10 10:40:53 -0700961 * binder_alloc_init() - called by binder_open() for per-proc initialization
962 * @alloc: binder_alloc for this proc
963 *
964 * Called from binder_open() to initialize binder_alloc fields for
965 * new binder proc
966 */
967void binder_alloc_init(struct binder_alloc *alloc)
968{
Todd Kjosb9341022016-10-10 10:40:53 -0700969 alloc->pid = current->group_leader->pid;
970 mutex_init(&alloc->mutex);
Sherry Yang7ffd4942017-08-03 11:33:53 -0700971 INIT_LIST_HEAD(&alloc->buffers);
Todd Kjosb9341022016-10-10 10:40:53 -0700972}
973
Tetsuo Handaf8cb8222017-11-29 22:29:47 +0900974int binder_alloc_shrinker_init(void)
Sherry Yang5828d702017-07-29 13:24:11 -0700975{
Tetsuo Handaf8cb8222017-11-29 22:29:47 +0900976 int ret = list_lru_init(&binder_alloc_lru);
977
978 if (ret == 0) {
979 ret = register_shrinker(&binder_shrinker);
980 if (ret)
981 list_lru_destroy(&binder_alloc_lru);
982 }
983 return ret;
Sherry Yang5828d702017-07-29 13:24:11 -0700984}
Todd Kjosd5049492019-02-08 10:35:14 -0800985
986/**
987 * check_buffer() - verify that buffer/offset is safe to access
988 * @alloc: binder_alloc for this proc
989 * @buffer: binder buffer to be accessed
990 * @offset: offset into @buffer data
991 * @bytes: bytes to access from offset
992 *
993 * Check that the @offset/@bytes are within the size of the given
994 * @buffer and that the buffer is currently active and not freeable.
995 * Offsets must also be multiples of sizeof(u32). The kernel is
996 * allowed to touch the buffer in two cases:
997 *
998 * 1) when the buffer is being created:
999 * (buffer->free == 0 && buffer->allow_user_free == 0)
1000 * 2) when the buffer is being torn down:
1001 * (buffer->free == 0 && buffer->transaction == NULL).
1002 *
1003 * Return: true if the buffer is safe to access
1004 */
1005static inline bool check_buffer(struct binder_alloc *alloc,
1006 struct binder_buffer *buffer,
1007 binder_size_t offset, size_t bytes)
1008{
1009 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1010
1011 return buffer_size >= bytes &&
1012 offset <= buffer_size - bytes &&
1013 IS_ALIGNED(offset, sizeof(u32)) &&
1014 !buffer->free &&
1015 (!buffer->allow_user_free || !buffer->transaction);
1016}
1017
1018/**
1019 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1020 * @alloc: binder_alloc for this proc
1021 * @buffer: binder buffer to be accessed
1022 * @buffer_offset: offset into @buffer data
1023 * @pgoffp: address to copy final page offset to
1024 *
1025 * Lookup the struct page corresponding to the address
Todd Kjos8539b1e2019-02-08 10:35:20 -08001026 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
Todd Kjosd5049492019-02-08 10:35:14 -08001027 * NULL, the byte-offset into the page is written there.
1028 *
1029 * The caller is responsible to ensure that the offset points
1030 * to a valid address within the @buffer and that @buffer is
1031 * not freeable by the user. Since it can't be freed, we are
1032 * guaranteed that the corresponding elements of @alloc->pages[]
1033 * cannot change.
1034 *
1035 * Return: struct page
1036 */
1037static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1038 struct binder_buffer *buffer,
1039 binder_size_t buffer_offset,
1040 pgoff_t *pgoffp)
1041{
1042 binder_size_t buffer_space_offset = buffer_offset +
Todd Kjos8539b1e2019-02-08 10:35:20 -08001043 (buffer->user_data - alloc->buffer);
Todd Kjosd5049492019-02-08 10:35:14 -08001044 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1045 size_t index = buffer_space_offset >> PAGE_SHIFT;
1046 struct binder_lru_page *lru_page;
1047
1048 lru_page = &alloc->pages[index];
1049 *pgoffp = pgoff;
1050 return lru_page->page_ptr;
1051}
1052
1053/**
1054 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1055 * @alloc: binder_alloc for this proc
1056 * @buffer: binder buffer to be accessed
1057 * @buffer_offset: offset into @buffer data
1058 * @from: userspace pointer to source buffer
1059 * @bytes: bytes to copy
1060 *
1061 * Copy bytes from source userspace to target buffer.
1062 *
1063 * Return: bytes remaining to be copied
1064 */
1065unsigned long
1066binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1067 struct binder_buffer *buffer,
1068 binder_size_t buffer_offset,
1069 const void __user *from,
1070 size_t bytes)
1071{
1072 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1073 return bytes;
1074
1075 while (bytes) {
1076 unsigned long size;
1077 unsigned long ret;
1078 struct page *page;
1079 pgoff_t pgoff;
1080 void *kptr;
1081
1082 page = binder_alloc_get_page(alloc, buffer,
1083 buffer_offset, &pgoff);
1084 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1085 kptr = kmap(page) + pgoff;
1086 ret = copy_from_user(kptr, from, size);
1087 kunmap(page);
1088 if (ret)
1089 return bytes - size + ret;
1090 bytes -= size;
1091 from += size;
1092 buffer_offset += size;
1093 }
1094 return 0;
1095}
Todd Kjos90a570c2019-02-08 10:35:15 -08001096
1097static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1098 bool to_buffer,
1099 struct binder_buffer *buffer,
1100 binder_size_t buffer_offset,
1101 void *ptr,
1102 size_t bytes)
1103{
1104 /* All copies must be 32-bit aligned and 32-bit size */
1105 BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
1106
1107 while (bytes) {
1108 unsigned long size;
1109 struct page *page;
1110 pgoff_t pgoff;
1111 void *tmpptr;
1112 void *base_ptr;
1113
1114 page = binder_alloc_get_page(alloc, buffer,
1115 buffer_offset, &pgoff);
1116 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1117 base_ptr = kmap_atomic(page);
1118 tmpptr = base_ptr + pgoff;
1119 if (to_buffer)
1120 memcpy(tmpptr, ptr, size);
1121 else
1122 memcpy(ptr, tmpptr, size);
1123 /*
1124 * kunmap_atomic() takes care of flushing the cache
1125 * if this device has VIVT cache arch
1126 */
1127 kunmap_atomic(base_ptr);
1128 bytes -= size;
1129 pgoff = 0;
1130 ptr = ptr + size;
1131 buffer_offset += size;
1132 }
1133}
1134
1135void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1136 struct binder_buffer *buffer,
1137 binder_size_t buffer_offset,
1138 void *src,
1139 size_t bytes)
1140{
1141 binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1142 src, bytes);
1143}
1144
1145void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1146 void *dest,
1147 struct binder_buffer *buffer,
1148 binder_size_t buffer_offset,
1149 size_t bytes)
1150{
1151 binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1152 dest, bytes);
1153}
1154