blob: 59cb6d9ddbc0c1d22c31e9e73f712b9ab6698c7e [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Anmol Sarma56b468f2012-10-30 22:35:43 +053018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090020#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000023#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090024#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
27#include <linux/mm.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30#include <linux/nsproxy.h>
31#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070032#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090033#include <linux/rbtree.h>
34#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070035#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090036#include <linux/uaccess.h>
37#include <linux/vmalloc.h>
Colin Crossc11a1662010-04-15 15:21:51 -070038#include <linux/slab.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080039#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050040#include <linux/security.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090041
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020042#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43#define BINDER_IPC_32BIT 1
44#endif
45
46#include <uapi/linux/android/binder.h>
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070047#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090048
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070049static DEFINE_MUTEX(binder_main_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090050static DEFINE_MUTEX(binder_deferred_lock);
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -080051static DEFINE_MUTEX(binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090052
53static HLIST_HEAD(binder_procs);
54static HLIST_HEAD(binder_deferred_list);
55static HLIST_HEAD(binder_dead_nodes);
56
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070057static struct dentry *binder_debugfs_dir_entry_root;
58static struct dentry *binder_debugfs_dir_entry_proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090059static int binder_last_id;
60
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070061#define BINDER_DEBUG_ENTRY(name) \
62static int binder_##name##_open(struct inode *inode, struct file *file) \
63{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070064 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070065} \
66\
67static const struct file_operations binder_##name##_fops = { \
68 .owner = THIS_MODULE, \
69 .open = binder_##name##_open, \
70 .read = seq_read, \
71 .llseek = seq_lseek, \
72 .release = single_release, \
73}
74
75static int binder_proc_show(struct seq_file *m, void *unused);
76BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090077
78/* This is only defined in include/asm-arm/sizes.h */
79#ifndef SZ_1K
80#define SZ_1K 0x400
81#endif
82
83#ifndef SZ_4M
84#define SZ_4M 0x400000
85#endif
86
87#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
88
89#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
90
91enum {
92 BINDER_DEBUG_USER_ERROR = 1U << 0,
93 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
94 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
95 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
96 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
97 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
98 BINDER_DEBUG_READ_WRITE = 1U << 6,
99 BINDER_DEBUG_USER_REFS = 1U << 7,
100 BINDER_DEBUG_THREADS = 1U << 8,
101 BINDER_DEBUG_TRANSACTION = 1U << 9,
102 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
103 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
104 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
105 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
106 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
107 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
108};
109static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
110 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
111module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
112
Zhengwang Ruan2c523252012-03-07 10:36:57 +0800113static bool binder_debug_no_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900114module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
115
116static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
117static int binder_stop_on_user_error;
118
119static int binder_set_stop_on_user_error(const char *val,
120 struct kernel_param *kp)
121{
122 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900123
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900124 ret = param_set_int(val, kp);
125 if (binder_stop_on_user_error < 2)
126 wake_up(&binder_user_error_wait);
127 return ret;
128}
129module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
130 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
131
132#define binder_debug(mask, x...) \
133 do { \
134 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400135 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900136 } while (0)
137
138#define binder_user_error(x...) \
139 do { \
140 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400141 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900142 if (binder_stop_on_user_error) \
143 binder_stop_on_user_error = 2; \
144 } while (0)
145
Martijn Coenenfeba3902017-02-03 14:40:45 -0800146#define to_flat_binder_object(hdr) \
147 container_of(hdr, struct flat_binder_object, hdr)
148
149#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
150
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900151enum binder_stat_types {
152 BINDER_STAT_PROC,
153 BINDER_STAT_THREAD,
154 BINDER_STAT_NODE,
155 BINDER_STAT_REF,
156 BINDER_STAT_DEATH,
157 BINDER_STAT_TRANSACTION,
158 BINDER_STAT_TRANSACTION_COMPLETE,
159 BINDER_STAT_COUNT
160};
161
162struct binder_stats {
163 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
164 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
165 int obj_created[BINDER_STAT_COUNT];
166 int obj_deleted[BINDER_STAT_COUNT];
167};
168
169static struct binder_stats binder_stats;
170
171static inline void binder_stats_deleted(enum binder_stat_types type)
172{
173 binder_stats.obj_deleted[type]++;
174}
175
176static inline void binder_stats_created(enum binder_stat_types type)
177{
178 binder_stats.obj_created[type]++;
179}
180
181struct binder_transaction_log_entry {
182 int debug_id;
183 int call_type;
184 int from_proc;
185 int from_thread;
186 int target_handle;
187 int to_proc;
188 int to_thread;
189 int to_node;
190 int data_size;
191 int offsets_size;
192};
193struct binder_transaction_log {
194 int next;
195 int full;
196 struct binder_transaction_log_entry entry[32];
197};
198static struct binder_transaction_log binder_transaction_log;
199static struct binder_transaction_log binder_transaction_log_failed;
200
201static struct binder_transaction_log_entry *binder_transaction_log_add(
202 struct binder_transaction_log *log)
203{
204 struct binder_transaction_log_entry *e;
Seunghun Lee10f62862014-05-01 01:30:23 +0900205
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900206 e = &log->entry[log->next];
207 memset(e, 0, sizeof(*e));
208 log->next++;
209 if (log->next == ARRAY_SIZE(log->entry)) {
210 log->next = 0;
211 log->full = 1;
212 }
213 return e;
214}
215
Martijn Coenen342e5c92017-02-03 14:40:46 -0800216struct binder_context {
217 struct binder_node *binder_context_mgr_node;
218 kuid_t binder_context_mgr_uid;
219};
220
221static struct binder_context global_context = {
222 .binder_context_mgr_uid = INVALID_UID,
223};
224
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900225struct binder_work {
226 struct list_head entry;
227 enum {
228 BINDER_WORK_TRANSACTION = 1,
229 BINDER_WORK_TRANSACTION_COMPLETE,
230 BINDER_WORK_NODE,
231 BINDER_WORK_DEAD_BINDER,
232 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
233 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
234 } type;
235};
236
237struct binder_node {
238 int debug_id;
239 struct binder_work work;
240 union {
241 struct rb_node rb_node;
242 struct hlist_node dead_node;
243 };
244 struct binder_proc *proc;
245 struct hlist_head refs;
246 int internal_strong_refs;
247 int local_weak_refs;
248 int local_strong_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800249 binder_uintptr_t ptr;
250 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900251 unsigned has_strong_ref:1;
252 unsigned pending_strong_ref:1;
253 unsigned has_weak_ref:1;
254 unsigned pending_weak_ref:1;
255 unsigned has_async_transaction:1;
256 unsigned accept_fds:1;
257 unsigned min_priority:8;
258 struct list_head async_todo;
259};
260
261struct binder_ref_death {
262 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800263 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900264};
265
266struct binder_ref {
267 /* Lookups needed: */
268 /* node + proc => ref (transaction) */
269 /* desc + proc => ref (transaction, inc/dec ref) */
270 /* node => refs + procs (proc exit) */
271 int debug_id;
272 struct rb_node rb_node_desc;
273 struct rb_node rb_node_node;
274 struct hlist_node node_entry;
275 struct binder_proc *proc;
276 struct binder_node *node;
277 uint32_t desc;
278 int strong;
279 int weak;
280 struct binder_ref_death *death;
281};
282
283struct binder_buffer {
Justin P. Mattock217218f2012-01-12 06:51:31 -0800284 struct list_head entry; /* free and allocated entries by address */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900285 struct rb_node rb_node; /* free entry by size or allocated entry */
286 /* by address */
287 unsigned free:1;
288 unsigned allow_user_free:1;
289 unsigned async_transaction:1;
290 unsigned debug_id:29;
291
292 struct binder_transaction *transaction;
293
294 struct binder_node *target_node;
295 size_t data_size;
296 size_t offsets_size;
297 uint8_t data[0];
298};
299
300enum binder_deferred_state {
301 BINDER_DEFERRED_PUT_FILES = 0x01,
302 BINDER_DEFERRED_FLUSH = 0x02,
303 BINDER_DEFERRED_RELEASE = 0x04,
304};
305
306struct binder_proc {
307 struct hlist_node proc_node;
308 struct rb_root threads;
309 struct rb_root nodes;
310 struct rb_root refs_by_desc;
311 struct rb_root refs_by_node;
312 int pid;
313 struct vm_area_struct *vma;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -0800314 struct mm_struct *vma_vm_mm;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900315 struct task_struct *tsk;
316 struct files_struct *files;
317 struct hlist_node deferred_work_node;
318 int deferred_work;
319 void *buffer;
320 ptrdiff_t user_buffer_offset;
321
322 struct list_head buffers;
323 struct rb_root free_buffers;
324 struct rb_root allocated_buffers;
325 size_t free_async_space;
326
327 struct page **pages;
328 size_t buffer_size;
329 uint32_t buffer_free;
330 struct list_head todo;
331 wait_queue_head_t wait;
332 struct binder_stats stats;
333 struct list_head delivered_death;
334 int max_threads;
335 int requested_threads;
336 int requested_threads_started;
337 int ready_threads;
338 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700339 struct dentry *debugfs_entry;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800340 struct binder_context *context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900341};
342
343enum {
344 BINDER_LOOPER_STATE_REGISTERED = 0x01,
345 BINDER_LOOPER_STATE_ENTERED = 0x02,
346 BINDER_LOOPER_STATE_EXITED = 0x04,
347 BINDER_LOOPER_STATE_INVALID = 0x08,
348 BINDER_LOOPER_STATE_WAITING = 0x10,
349 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
350};
351
352struct binder_thread {
353 struct binder_proc *proc;
354 struct rb_node rb_node;
355 int pid;
356 int looper;
357 struct binder_transaction *transaction_stack;
358 struct list_head todo;
359 uint32_t return_error; /* Write failed, return error code in read buf */
360 uint32_t return_error2; /* Write failed, return error code in read */
361 /* buffer. Used when sending a reply to a dead process that */
362 /* we are also waiting on */
363 wait_queue_head_t wait;
364 struct binder_stats stats;
365};
366
367struct binder_transaction {
368 int debug_id;
369 struct binder_work work;
370 struct binder_thread *from;
371 struct binder_transaction *from_parent;
372 struct binder_proc *to_proc;
373 struct binder_thread *to_thread;
374 struct binder_transaction *to_parent;
375 unsigned need_reply:1;
376 /* unsigned is_dead:1; */ /* not used at the moment */
377
378 struct binder_buffer *buffer;
379 unsigned int code;
380 unsigned int flags;
381 long priority;
382 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600383 kuid_t sender_euid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900384};
385
386static void
387binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
388
Sachin Kamatefde99c2012-08-17 16:39:36 +0530389static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900390{
391 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900392 unsigned long rlim_cur;
393 unsigned long irqs;
394
395 if (files == NULL)
396 return -ESRCH;
397
Al Virodcfadfa2012-08-12 17:27:30 -0400398 if (!lock_task_sighand(proc->tsk, &irqs))
399 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400
Al Virodcfadfa2012-08-12 17:27:30 -0400401 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
402 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900403
Al Virodcfadfa2012-08-12 17:27:30 -0400404 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900405}
406
407/*
408 * copied from fd_install
409 */
410static void task_fd_install(
411 struct binder_proc *proc, unsigned int fd, struct file *file)
412{
Al Virof869e8a2012-08-15 21:06:33 -0400413 if (proc->files)
414 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900415}
416
417/*
418 * copied from sys_close
419 */
420static long task_close_fd(struct binder_proc *proc, unsigned int fd)
421{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900422 int retval;
423
Al Viro483ce1d2012-08-19 12:04:24 -0400424 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900425 return -ESRCH;
426
Al Viro483ce1d2012-08-19 12:04:24 -0400427 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900428 /* can't restart close syscall because file table entry was cleared */
429 if (unlikely(retval == -ERESTARTSYS ||
430 retval == -ERESTARTNOINTR ||
431 retval == -ERESTARTNOHAND ||
432 retval == -ERESTART_RESTARTBLOCK))
433 retval = -EINTR;
434
435 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900436}
437
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700438static inline void binder_lock(const char *tag)
439{
440 trace_binder_lock(tag);
441 mutex_lock(&binder_main_lock);
442 trace_binder_locked(tag);
443}
444
445static inline void binder_unlock(const char *tag)
446{
447 trace_binder_unlock(tag);
448 mutex_unlock(&binder_main_lock);
449}
450
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900451static void binder_set_nice(long nice)
452{
453 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900454
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900455 if (can_nice(current, nice)) {
456 set_user_nice(current, nice);
457 return;
458 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900459 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900460 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530461 "%d: nice value %ld not allowed use %ld instead\n",
462 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900463 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800464 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900465 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530466 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900467}
468
469static size_t binder_buffer_size(struct binder_proc *proc,
470 struct binder_buffer *buffer)
471{
472 if (list_is_last(&buffer->entry, &proc->buffers))
473 return proc->buffer + proc->buffer_size - (void *)buffer->data;
Karthik Nayak78733112014-06-21 20:23:16 +0530474 return (size_t)list_entry(buffer->entry.next,
475 struct binder_buffer, entry) - (size_t)buffer->data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900476}
477
478static void binder_insert_free_buffer(struct binder_proc *proc,
479 struct binder_buffer *new_buffer)
480{
481 struct rb_node **p = &proc->free_buffers.rb_node;
482 struct rb_node *parent = NULL;
483 struct binder_buffer *buffer;
484 size_t buffer_size;
485 size_t new_buffer_size;
486
487 BUG_ON(!new_buffer->free);
488
489 new_buffer_size = binder_buffer_size(proc, new_buffer);
490
491 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530492 "%d: add free buffer, size %zd, at %p\n",
493 proc->pid, new_buffer_size, new_buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900494
495 while (*p) {
496 parent = *p;
497 buffer = rb_entry(parent, struct binder_buffer, rb_node);
498 BUG_ON(!buffer->free);
499
500 buffer_size = binder_buffer_size(proc, buffer);
501
502 if (new_buffer_size < buffer_size)
503 p = &parent->rb_left;
504 else
505 p = &parent->rb_right;
506 }
507 rb_link_node(&new_buffer->rb_node, parent, p);
508 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
509}
510
511static void binder_insert_allocated_buffer(struct binder_proc *proc,
512 struct binder_buffer *new_buffer)
513{
514 struct rb_node **p = &proc->allocated_buffers.rb_node;
515 struct rb_node *parent = NULL;
516 struct binder_buffer *buffer;
517
518 BUG_ON(new_buffer->free);
519
520 while (*p) {
521 parent = *p;
522 buffer = rb_entry(parent, struct binder_buffer, rb_node);
523 BUG_ON(buffer->free);
524
525 if (new_buffer < buffer)
526 p = &parent->rb_left;
527 else if (new_buffer > buffer)
528 p = &parent->rb_right;
529 else
530 BUG();
531 }
532 rb_link_node(&new_buffer->rb_node, parent, p);
533 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
534}
535
536static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800537 uintptr_t user_ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900538{
539 struct rb_node *n = proc->allocated_buffers.rb_node;
540 struct binder_buffer *buffer;
541 struct binder_buffer *kern_ptr;
542
Arve Hjønnevågda498892014-02-21 14:40:26 -0800543 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
544 - offsetof(struct binder_buffer, data));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900545
546 while (n) {
547 buffer = rb_entry(n, struct binder_buffer, rb_node);
548 BUG_ON(buffer->free);
549
550 if (kern_ptr < buffer)
551 n = n->rb_left;
552 else if (kern_ptr > buffer)
553 n = n->rb_right;
554 else
555 return buffer;
556 }
557 return NULL;
558}
559
560static int binder_update_page_range(struct binder_proc *proc, int allocate,
561 void *start, void *end,
562 struct vm_area_struct *vma)
563{
564 void *page_addr;
565 unsigned long user_page_addr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900566 struct page **page;
567 struct mm_struct *mm;
568
569 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530570 "%d: %s pages %p-%p\n", proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900571 allocate ? "allocate" : "free", start, end);
572
573 if (end <= start)
574 return 0;
575
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700576 trace_binder_update_page_range(proc, allocate, start, end);
577
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900578 if (vma)
579 mm = NULL;
580 else
581 mm = get_task_mm(proc->tsk);
582
583 if (mm) {
584 down_write(&mm->mmap_sem);
585 vma = proc->vma;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -0800586 if (vma && mm != proc->vma_vm_mm) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530587 pr_err("%d: vma mm and task mm mismatch\n",
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -0800588 proc->pid);
589 vma = NULL;
590 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900591 }
592
593 if (allocate == 0)
594 goto free_range;
595
596 if (vma == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530597 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
598 proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900599 goto err_no_vma;
600 }
601
602 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
603 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900604
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900605 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
606
607 BUG_ON(*page);
Arve Hjønnevåg585650d2012-10-16 15:29:55 -0700608 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900609 if (*page == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530610 pr_err("%d: binder_alloc_buf failed for page at %p\n",
611 proc->pid, page_addr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900612 goto err_alloc_page_failed;
613 }
Andrey Ryabininf4c72c72015-02-27 20:44:21 +0300614 ret = map_kernel_range_noflush((unsigned long)page_addr,
615 PAGE_SIZE, PAGE_KERNEL, page);
616 flush_cache_vmap((unsigned long)page_addr,
617 (unsigned long)page_addr + PAGE_SIZE);
618 if (ret != 1) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530619 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900620 proc->pid, page_addr);
621 goto err_map_kernel_failed;
622 }
623 user_page_addr =
624 (uintptr_t)page_addr + proc->user_buffer_offset;
625 ret = vm_insert_page(vma, user_page_addr, page[0]);
626 if (ret) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530627 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900628 proc->pid, user_page_addr);
629 goto err_vm_insert_page_failed;
630 }
631 /* vm_insert_page does not seem to increment the refcount */
632 }
633 if (mm) {
634 up_write(&mm->mmap_sem);
635 mmput(mm);
636 }
637 return 0;
638
639free_range:
640 for (page_addr = end - PAGE_SIZE; page_addr >= start;
641 page_addr -= PAGE_SIZE) {
642 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
643 if (vma)
644 zap_page_range(vma, (uintptr_t)page_addr +
645 proc->user_buffer_offset, PAGE_SIZE, NULL);
646err_vm_insert_page_failed:
647 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
648err_map_kernel_failed:
649 __free_page(*page);
650 *page = NULL;
651err_alloc_page_failed:
652 ;
653 }
654err_no_vma:
655 if (mm) {
656 up_write(&mm->mmap_sem);
657 mmput(mm);
658 }
659 return -ENOMEM;
660}
661
662static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
663 size_t data_size,
664 size_t offsets_size, int is_async)
665{
666 struct rb_node *n = proc->free_buffers.rb_node;
667 struct binder_buffer *buffer;
668 size_t buffer_size;
669 struct rb_node *best_fit = NULL;
670 void *has_page_addr;
671 void *end_page_addr;
672 size_t size;
673
674 if (proc->vma == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530675 pr_err("%d: binder_alloc_buf, no vma\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900676 proc->pid);
677 return NULL;
678 }
679
680 size = ALIGN(data_size, sizeof(void *)) +
681 ALIGN(offsets_size, sizeof(void *));
682
683 if (size < data_size || size < offsets_size) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530684 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
685 proc->pid, data_size, offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900686 return NULL;
687 }
688
689 if (is_async &&
690 proc->free_async_space < size + sizeof(struct binder_buffer)) {
691 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530692 "%d: binder_alloc_buf size %zd failed, no async space left\n",
693 proc->pid, size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900694 return NULL;
695 }
696
697 while (n) {
698 buffer = rb_entry(n, struct binder_buffer, rb_node);
699 BUG_ON(!buffer->free);
700 buffer_size = binder_buffer_size(proc, buffer);
701
702 if (size < buffer_size) {
703 best_fit = n;
704 n = n->rb_left;
705 } else if (size > buffer_size)
706 n = n->rb_right;
707 else {
708 best_fit = n;
709 break;
710 }
711 }
712 if (best_fit == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530713 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
714 proc->pid, size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900715 return NULL;
716 }
717 if (n == NULL) {
718 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
719 buffer_size = binder_buffer_size(proc, buffer);
720 }
721
722 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530723 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
724 proc->pid, size, buffer, buffer_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900725
726 has_page_addr =
727 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
728 if (n == NULL) {
729 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
730 buffer_size = size; /* no room for other buffers */
731 else
732 buffer_size = size + sizeof(struct binder_buffer);
733 }
734 end_page_addr =
735 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
736 if (end_page_addr > has_page_addr)
737 end_page_addr = has_page_addr;
738 if (binder_update_page_range(proc, 1,
739 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
740 return NULL;
741
742 rb_erase(best_fit, &proc->free_buffers);
743 buffer->free = 0;
744 binder_insert_allocated_buffer(proc, buffer);
745 if (buffer_size != size) {
746 struct binder_buffer *new_buffer = (void *)buffer->data + size;
Seunghun Lee10f62862014-05-01 01:30:23 +0900747
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900748 list_add(&new_buffer->entry, &buffer->entry);
749 new_buffer->free = 1;
750 binder_insert_free_buffer(proc, new_buffer);
751 }
752 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530753 "%d: binder_alloc_buf size %zd got %p\n",
754 proc->pid, size, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900755 buffer->data_size = data_size;
756 buffer->offsets_size = offsets_size;
757 buffer->async_transaction = is_async;
758 if (is_async) {
759 proc->free_async_space -= size + sizeof(struct binder_buffer);
760 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530761 "%d: binder_alloc_buf size %zd async free %zd\n",
762 proc->pid, size, proc->free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900763 }
764
765 return buffer;
766}
767
768static void *buffer_start_page(struct binder_buffer *buffer)
769{
770 return (void *)((uintptr_t)buffer & PAGE_MASK);
771}
772
773static void *buffer_end_page(struct binder_buffer *buffer)
774{
775 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
776}
777
778static void binder_delete_free_buffer(struct binder_proc *proc,
779 struct binder_buffer *buffer)
780{
781 struct binder_buffer *prev, *next = NULL;
782 int free_page_end = 1;
783 int free_page_start = 1;
784
785 BUG_ON(proc->buffers.next == &buffer->entry);
786 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
787 BUG_ON(!prev->free);
788 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
789 free_page_start = 0;
790 if (buffer_end_page(prev) == buffer_end_page(buffer))
791 free_page_end = 0;
792 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530793 "%d: merge free, buffer %p share page with %p\n",
794 proc->pid, buffer, prev);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900795 }
796
797 if (!list_is_last(&buffer->entry, &proc->buffers)) {
798 next = list_entry(buffer->entry.next,
799 struct binder_buffer, entry);
800 if (buffer_start_page(next) == buffer_end_page(buffer)) {
801 free_page_end = 0;
802 if (buffer_start_page(next) ==
803 buffer_start_page(buffer))
804 free_page_start = 0;
805 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530806 "%d: merge free, buffer %p share page with %p\n",
807 proc->pid, buffer, prev);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900808 }
809 }
810 list_del(&buffer->entry);
811 if (free_page_start || free_page_end) {
812 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Masanari Iida1dcdbfd2013-06-23 23:47:15 +0900813 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900814 proc->pid, buffer, free_page_start ? "" : " end",
815 free_page_end ? "" : " start", prev, next);
816 binder_update_page_range(proc, 0, free_page_start ?
817 buffer_start_page(buffer) : buffer_end_page(buffer),
818 (free_page_end ? buffer_end_page(buffer) :
819 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
820 }
821}
822
823static void binder_free_buf(struct binder_proc *proc,
824 struct binder_buffer *buffer)
825{
826 size_t size, buffer_size;
827
828 buffer_size = binder_buffer_size(proc, buffer);
829
830 size = ALIGN(buffer->data_size, sizeof(void *)) +
831 ALIGN(buffer->offsets_size, sizeof(void *));
832
833 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530834 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
835 proc->pid, buffer, size, buffer_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900836
837 BUG_ON(buffer->free);
838 BUG_ON(size > buffer_size);
839 BUG_ON(buffer->transaction != NULL);
840 BUG_ON((void *)buffer < proc->buffer);
841 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
842
843 if (buffer->async_transaction) {
844 proc->free_async_space += size + sizeof(struct binder_buffer);
845
846 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530847 "%d: binder_free_buf size %zd async free %zd\n",
848 proc->pid, size, proc->free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900849 }
850
851 binder_update_page_range(proc, 0,
852 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
853 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
854 NULL);
855 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
856 buffer->free = 1;
857 if (!list_is_last(&buffer->entry, &proc->buffers)) {
858 struct binder_buffer *next = list_entry(buffer->entry.next,
859 struct binder_buffer, entry);
Seunghun Lee10f62862014-05-01 01:30:23 +0900860
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900861 if (next->free) {
862 rb_erase(&next->rb_node, &proc->free_buffers);
863 binder_delete_free_buffer(proc, next);
864 }
865 }
866 if (proc->buffers.next != &buffer->entry) {
867 struct binder_buffer *prev = list_entry(buffer->entry.prev,
868 struct binder_buffer, entry);
Seunghun Lee10f62862014-05-01 01:30:23 +0900869
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900870 if (prev->free) {
871 binder_delete_free_buffer(proc, buffer);
872 rb_erase(&prev->rb_node, &proc->free_buffers);
873 buffer = prev;
874 }
875 }
876 binder_insert_free_buffer(proc, buffer);
877}
878
879static struct binder_node *binder_get_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800880 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900881{
882 struct rb_node *n = proc->nodes.rb_node;
883 struct binder_node *node;
884
885 while (n) {
886 node = rb_entry(n, struct binder_node, rb_node);
887
888 if (ptr < node->ptr)
889 n = n->rb_left;
890 else if (ptr > node->ptr)
891 n = n->rb_right;
892 else
893 return node;
894 }
895 return NULL;
896}
897
898static struct binder_node *binder_new_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800899 binder_uintptr_t ptr,
900 binder_uintptr_t cookie)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900901{
902 struct rb_node **p = &proc->nodes.rb_node;
903 struct rb_node *parent = NULL;
904 struct binder_node *node;
905
906 while (*p) {
907 parent = *p;
908 node = rb_entry(parent, struct binder_node, rb_node);
909
910 if (ptr < node->ptr)
911 p = &(*p)->rb_left;
912 else if (ptr > node->ptr)
913 p = &(*p)->rb_right;
914 else
915 return NULL;
916 }
917
918 node = kzalloc(sizeof(*node), GFP_KERNEL);
919 if (node == NULL)
920 return NULL;
921 binder_stats_created(BINDER_STAT_NODE);
922 rb_link_node(&node->rb_node, parent, p);
923 rb_insert_color(&node->rb_node, &proc->nodes);
924 node->debug_id = ++binder_last_id;
925 node->proc = proc;
926 node->ptr = ptr;
927 node->cookie = cookie;
928 node->work.type = BINDER_WORK_NODE;
929 INIT_LIST_HEAD(&node->work.entry);
930 INIT_LIST_HEAD(&node->async_todo);
931 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800932 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900933 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800934 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900935 return node;
936}
937
938static int binder_inc_node(struct binder_node *node, int strong, int internal,
939 struct list_head *target_list)
940{
941 if (strong) {
942 if (internal) {
943 if (target_list == NULL &&
944 node->internal_strong_refs == 0 &&
Martijn Coenen342e5c92017-02-03 14:40:46 -0800945 !(node->proc &&
946 node == node->proc->context->binder_context_mgr_node &&
947 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530948 pr_err("invalid inc strong node for %d\n",
949 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900950 return -EINVAL;
951 }
952 node->internal_strong_refs++;
953 } else
954 node->local_strong_refs++;
955 if (!node->has_strong_ref && target_list) {
956 list_del_init(&node->work.entry);
957 list_add_tail(&node->work.entry, target_list);
958 }
959 } else {
960 if (!internal)
961 node->local_weak_refs++;
962 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
963 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530964 pr_err("invalid inc weak node for %d\n",
965 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900966 return -EINVAL;
967 }
968 list_add_tail(&node->work.entry, target_list);
969 }
970 }
971 return 0;
972}
973
974static int binder_dec_node(struct binder_node *node, int strong, int internal)
975{
976 if (strong) {
977 if (internal)
978 node->internal_strong_refs--;
979 else
980 node->local_strong_refs--;
981 if (node->local_strong_refs || node->internal_strong_refs)
982 return 0;
983 } else {
984 if (!internal)
985 node->local_weak_refs--;
986 if (node->local_weak_refs || !hlist_empty(&node->refs))
987 return 0;
988 }
989 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
990 if (list_empty(&node->work.entry)) {
991 list_add_tail(&node->work.entry, &node->proc->todo);
992 wake_up_interruptible(&node->proc->wait);
993 }
994 } else {
995 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
996 !node->local_weak_refs) {
997 list_del_init(&node->work.entry);
998 if (node->proc) {
999 rb_erase(&node->rb_node, &node->proc->nodes);
1000 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301001 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001002 node->debug_id);
1003 } else {
1004 hlist_del(&node->dead_node);
1005 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301006 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001007 node->debug_id);
1008 }
1009 kfree(node);
1010 binder_stats_deleted(BINDER_STAT_NODE);
1011 }
1012 }
1013
1014 return 0;
1015}
1016
1017
1018static struct binder_ref *binder_get_ref(struct binder_proc *proc,
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001019 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001020{
1021 struct rb_node *n = proc->refs_by_desc.rb_node;
1022 struct binder_ref *ref;
1023
1024 while (n) {
1025 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1026
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001027 if (desc < ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001028 n = n->rb_left;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001029 } else if (desc > ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001030 n = n->rb_right;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001031 } else if (need_strong_ref && !ref->strong) {
1032 binder_user_error("tried to use weak ref as strong ref\n");
1033 return NULL;
1034 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001035 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001036 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001037 }
1038 return NULL;
1039}
1040
1041static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1042 struct binder_node *node)
1043{
1044 struct rb_node *n;
1045 struct rb_node **p = &proc->refs_by_node.rb_node;
1046 struct rb_node *parent = NULL;
1047 struct binder_ref *ref, *new_ref;
Martijn Coenen342e5c92017-02-03 14:40:46 -08001048 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001049
1050 while (*p) {
1051 parent = *p;
1052 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1053
1054 if (node < ref->node)
1055 p = &(*p)->rb_left;
1056 else if (node > ref->node)
1057 p = &(*p)->rb_right;
1058 else
1059 return ref;
1060 }
1061 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1062 if (new_ref == NULL)
1063 return NULL;
1064 binder_stats_created(BINDER_STAT_REF);
1065 new_ref->debug_id = ++binder_last_id;
1066 new_ref->proc = proc;
1067 new_ref->node = node;
1068 rb_link_node(&new_ref->rb_node_node, parent, p);
1069 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1070
Martijn Coenen342e5c92017-02-03 14:40:46 -08001071 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001072 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1073 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1074 if (ref->desc > new_ref->desc)
1075 break;
1076 new_ref->desc = ref->desc + 1;
1077 }
1078
1079 p = &proc->refs_by_desc.rb_node;
1080 while (*p) {
1081 parent = *p;
1082 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1083
1084 if (new_ref->desc < ref->desc)
1085 p = &(*p)->rb_left;
1086 else if (new_ref->desc > ref->desc)
1087 p = &(*p)->rb_right;
1088 else
1089 BUG();
1090 }
1091 rb_link_node(&new_ref->rb_node_desc, parent, p);
1092 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1093 if (node) {
1094 hlist_add_head(&new_ref->node_entry, &node->refs);
1095
1096 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301097 "%d new ref %d desc %d for node %d\n",
1098 proc->pid, new_ref->debug_id, new_ref->desc,
1099 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001100 } else {
1101 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301102 "%d new ref %d desc %d for dead node\n",
1103 proc->pid, new_ref->debug_id, new_ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001104 }
1105 return new_ref;
1106}
1107
1108static void binder_delete_ref(struct binder_ref *ref)
1109{
1110 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301111 "%d delete ref %d desc %d for node %d\n",
1112 ref->proc->pid, ref->debug_id, ref->desc,
1113 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001114
1115 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1116 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1117 if (ref->strong)
1118 binder_dec_node(ref->node, 1, 1);
1119 hlist_del(&ref->node_entry);
1120 binder_dec_node(ref->node, 0, 1);
1121 if (ref->death) {
1122 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301123 "%d delete ref %d desc %d has death notification\n",
1124 ref->proc->pid, ref->debug_id, ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001125 list_del(&ref->death->work.entry);
1126 kfree(ref->death);
1127 binder_stats_deleted(BINDER_STAT_DEATH);
1128 }
1129 kfree(ref);
1130 binder_stats_deleted(BINDER_STAT_REF);
1131}
1132
1133static int binder_inc_ref(struct binder_ref *ref, int strong,
1134 struct list_head *target_list)
1135{
1136 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001137
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001138 if (strong) {
1139 if (ref->strong == 0) {
1140 ret = binder_inc_node(ref->node, 1, 1, target_list);
1141 if (ret)
1142 return ret;
1143 }
1144 ref->strong++;
1145 } else {
1146 if (ref->weak == 0) {
1147 ret = binder_inc_node(ref->node, 0, 1, target_list);
1148 if (ret)
1149 return ret;
1150 }
1151 ref->weak++;
1152 }
1153 return 0;
1154}
1155
1156
1157static int binder_dec_ref(struct binder_ref *ref, int strong)
1158{
1159 if (strong) {
1160 if (ref->strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301161 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001162 ref->proc->pid, ref->debug_id,
1163 ref->desc, ref->strong, ref->weak);
1164 return -EINVAL;
1165 }
1166 ref->strong--;
1167 if (ref->strong == 0) {
1168 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001169
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001170 ret = binder_dec_node(ref->node, strong, 1);
1171 if (ret)
1172 return ret;
1173 }
1174 } else {
1175 if (ref->weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301176 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001177 ref->proc->pid, ref->debug_id,
1178 ref->desc, ref->strong, ref->weak);
1179 return -EINVAL;
1180 }
1181 ref->weak--;
1182 }
1183 if (ref->strong == 0 && ref->weak == 0)
1184 binder_delete_ref(ref);
1185 return 0;
1186}
1187
1188static void binder_pop_transaction(struct binder_thread *target_thread,
1189 struct binder_transaction *t)
1190{
1191 if (target_thread) {
1192 BUG_ON(target_thread->transaction_stack != t);
1193 BUG_ON(target_thread->transaction_stack->from != target_thread);
1194 target_thread->transaction_stack =
1195 target_thread->transaction_stack->from_parent;
1196 t->from = NULL;
1197 }
1198 t->need_reply = 0;
1199 if (t->buffer)
1200 t->buffer->transaction = NULL;
1201 kfree(t);
1202 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1203}
1204
1205static void binder_send_failed_reply(struct binder_transaction *t,
1206 uint32_t error_code)
1207{
1208 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001209 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001210
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001211 BUG_ON(t->flags & TF_ONE_WAY);
1212 while (1) {
1213 target_thread = t->from;
1214 if (target_thread) {
1215 if (target_thread->return_error != BR_OK &&
1216 target_thread->return_error2 == BR_OK) {
1217 target_thread->return_error2 =
1218 target_thread->return_error;
1219 target_thread->return_error = BR_OK;
1220 }
1221 if (target_thread->return_error == BR_OK) {
1222 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301223 "send failed reply for transaction %d to %d:%d\n",
William Panlener0232a422014-09-03 22:44:03 -05001224 t->debug_id,
1225 target_thread->proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001226 target_thread->pid);
1227
1228 binder_pop_transaction(target_thread, t);
1229 target_thread->return_error = error_code;
1230 wake_up_interruptible(&target_thread->wait);
1231 } else {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301232 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1233 target_thread->proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001234 target_thread->pid,
1235 target_thread->return_error);
1236 }
1237 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001238 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001239 next = t->from_parent;
1240
1241 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1242 "send failed reply for transaction %d, target dead\n",
1243 t->debug_id);
1244
1245 binder_pop_transaction(target_thread, t);
1246 if (next == NULL) {
1247 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1248 "reply failed, no target thread at root\n");
1249 return;
1250 }
1251 t = next;
1252 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1253 "reply failed, no target thread -- retry %d\n",
1254 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001255 }
1256}
1257
Martijn Coenenfeba3902017-02-03 14:40:45 -08001258/**
1259 * binder_validate_object() - checks for a valid metadata object in a buffer.
1260 * @buffer: binder_buffer that we're parsing.
1261 * @offset: offset in the buffer at which to validate an object.
1262 *
1263 * Return: If there's a valid metadata object at @offset in @buffer, the
1264 * size of that object. Otherwise, it returns zero.
1265 */
1266static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1267{
1268 /* Check if we can read a header first */
1269 struct binder_object_header *hdr;
1270 size_t object_size = 0;
1271
1272 if (offset > buffer->data_size - sizeof(*hdr) ||
1273 buffer->data_size < sizeof(*hdr) ||
1274 !IS_ALIGNED(offset, sizeof(u32)))
1275 return 0;
1276
1277 /* Ok, now see if we can read a complete object. */
1278 hdr = (struct binder_object_header *)(buffer->data + offset);
1279 switch (hdr->type) {
1280 case BINDER_TYPE_BINDER:
1281 case BINDER_TYPE_WEAK_BINDER:
1282 case BINDER_TYPE_HANDLE:
1283 case BINDER_TYPE_WEAK_HANDLE:
1284 object_size = sizeof(struct flat_binder_object);
1285 break;
1286 case BINDER_TYPE_FD:
1287 object_size = sizeof(struct binder_fd_object);
1288 break;
1289 default:
1290 return 0;
1291 }
1292 if (offset <= buffer->data_size - object_size &&
1293 buffer->data_size >= object_size)
1294 return object_size;
1295 else
1296 return 0;
1297}
1298
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001299static void binder_transaction_buffer_release(struct binder_proc *proc,
1300 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001301 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001302{
Arve Hjønnevågda498892014-02-21 14:40:26 -08001303 binder_size_t *offp, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001304 int debug_id = buffer->debug_id;
1305
1306 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301307 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001308 proc->pid, buffer->debug_id,
1309 buffer->data_size, buffer->offsets_size, failed_at);
1310
1311 if (buffer->target_node)
1312 binder_dec_node(buffer->target_node, 1, 0);
1313
Arve Hjønnevågda498892014-02-21 14:40:26 -08001314 offp = (binder_size_t *)(buffer->data +
1315 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001316 if (failed_at)
1317 off_end = failed_at;
1318 else
1319 off_end = (void *)offp + buffer->offsets_size;
1320 for (; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001321 struct binder_object_header *hdr;
1322 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001323
Martijn Coenenfeba3902017-02-03 14:40:45 -08001324 if (object_size == 0) {
1325 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001326 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001327 continue;
1328 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001329 hdr = (struct binder_object_header *)(buffer->data + *offp);
1330 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001331 case BINDER_TYPE_BINDER:
1332 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001333 struct flat_binder_object *fp;
1334 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09001335
Martijn Coenenfeba3902017-02-03 14:40:45 -08001336 fp = to_flat_binder_object(hdr);
1337 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001338 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001339 pr_err("transaction release %d bad node %016llx\n",
1340 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001341 break;
1342 }
1343 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001344 " node %d u%016llx\n",
1345 node->debug_id, (u64)node->ptr);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001346 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1347 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001348 } break;
1349 case BINDER_TYPE_HANDLE:
1350 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001351 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001352 struct binder_ref *ref;
1353
Martijn Coenenfeba3902017-02-03 14:40:45 -08001354 fp = to_flat_binder_object(hdr);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001355 ref = binder_get_ref(proc, fp->handle,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001356 hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001357 if (ref == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001358 pr_err("transaction release %d bad handle %d\n",
Anmol Sarma56b468f2012-10-30 22:35:43 +05301359 debug_id, fp->handle);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001360 break;
1361 }
1362 binder_debug(BINDER_DEBUG_TRANSACTION,
1363 " ref %d desc %d (node %d)\n",
1364 ref->debug_id, ref->desc, ref->node->debug_id);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001365 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001366 } break;
1367
Martijn Coenenfeba3902017-02-03 14:40:45 -08001368 case BINDER_TYPE_FD: {
1369 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1370
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001371 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001372 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001373 if (failed_at)
Martijn Coenenfeba3902017-02-03 14:40:45 -08001374 task_close_fd(proc, fp->fd);
1375 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001376
1377 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001378 pr_err("transaction release %d bad object type %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001379 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001380 break;
1381 }
1382 }
1383}
1384
1385static void binder_transaction(struct binder_proc *proc,
1386 struct binder_thread *thread,
1387 struct binder_transaction_data *tr, int reply)
1388{
1389 struct binder_transaction *t;
1390 struct binder_work *tcomplete;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001391 binder_size_t *offp, *off_end;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001392 binder_size_t off_min;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001393 struct binder_proc *target_proc;
1394 struct binder_thread *target_thread = NULL;
1395 struct binder_node *target_node = NULL;
1396 struct list_head *target_list;
1397 wait_queue_head_t *target_wait;
1398 struct binder_transaction *in_reply_to = NULL;
1399 struct binder_transaction_log_entry *e;
1400 uint32_t return_error;
Martijn Coenen342e5c92017-02-03 14:40:46 -08001401 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001402
1403 e = binder_transaction_log_add(&binder_transaction_log);
1404 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1405 e->from_proc = proc->pid;
1406 e->from_thread = thread->pid;
1407 e->target_handle = tr->target.handle;
1408 e->data_size = tr->data_size;
1409 e->offsets_size = tr->offsets_size;
1410
1411 if (reply) {
1412 in_reply_to = thread->transaction_stack;
1413 if (in_reply_to == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301414 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001415 proc->pid, thread->pid);
1416 return_error = BR_FAILED_REPLY;
1417 goto err_empty_call_stack;
1418 }
1419 binder_set_nice(in_reply_to->saved_priority);
1420 if (in_reply_to->to_thread != thread) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301421 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001422 proc->pid, thread->pid, in_reply_to->debug_id,
1423 in_reply_to->to_proc ?
1424 in_reply_to->to_proc->pid : 0,
1425 in_reply_to->to_thread ?
1426 in_reply_to->to_thread->pid : 0);
1427 return_error = BR_FAILED_REPLY;
1428 in_reply_to = NULL;
1429 goto err_bad_call_stack;
1430 }
1431 thread->transaction_stack = in_reply_to->to_parent;
1432 target_thread = in_reply_to->from;
1433 if (target_thread == NULL) {
1434 return_error = BR_DEAD_REPLY;
1435 goto err_dead_binder;
1436 }
1437 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301438 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001439 proc->pid, thread->pid,
1440 target_thread->transaction_stack ?
1441 target_thread->transaction_stack->debug_id : 0,
1442 in_reply_to->debug_id);
1443 return_error = BR_FAILED_REPLY;
1444 in_reply_to = NULL;
1445 target_thread = NULL;
1446 goto err_dead_binder;
1447 }
1448 target_proc = target_thread->proc;
1449 } else {
1450 if (tr->target.handle) {
1451 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001452
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001453 ref = binder_get_ref(proc, tr->target.handle, true);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001454 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301455 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001456 proc->pid, thread->pid);
1457 return_error = BR_FAILED_REPLY;
1458 goto err_invalid_target_handle;
1459 }
1460 target_node = ref->node;
1461 } else {
Martijn Coenen342e5c92017-02-03 14:40:46 -08001462 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001463 if (target_node == NULL) {
1464 return_error = BR_DEAD_REPLY;
1465 goto err_no_context_mgr_node;
1466 }
1467 }
1468 e->to_node = target_node->debug_id;
1469 target_proc = target_node->proc;
1470 if (target_proc == NULL) {
1471 return_error = BR_DEAD_REPLY;
1472 goto err_dead_binder;
1473 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001474 if (security_binder_transaction(proc->tsk,
1475 target_proc->tsk) < 0) {
1476 return_error = BR_FAILED_REPLY;
1477 goto err_invalid_target_handle;
1478 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001479 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1480 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09001481
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001482 tmp = thread->transaction_stack;
1483 if (tmp->to_thread != thread) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301484 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001485 proc->pid, thread->pid, tmp->debug_id,
1486 tmp->to_proc ? tmp->to_proc->pid : 0,
1487 tmp->to_thread ?
1488 tmp->to_thread->pid : 0);
1489 return_error = BR_FAILED_REPLY;
1490 goto err_bad_call_stack;
1491 }
1492 while (tmp) {
1493 if (tmp->from && tmp->from->proc == target_proc)
1494 target_thread = tmp->from;
1495 tmp = tmp->from_parent;
1496 }
1497 }
1498 }
1499 if (target_thread) {
1500 e->to_thread = target_thread->pid;
1501 target_list = &target_thread->todo;
1502 target_wait = &target_thread->wait;
1503 } else {
1504 target_list = &target_proc->todo;
1505 target_wait = &target_proc->wait;
1506 }
1507 e->to_proc = target_proc->pid;
1508
1509 /* TODO: reuse incoming transaction for reply */
1510 t = kzalloc(sizeof(*t), GFP_KERNEL);
1511 if (t == NULL) {
1512 return_error = BR_FAILED_REPLY;
1513 goto err_alloc_t_failed;
1514 }
1515 binder_stats_created(BINDER_STAT_TRANSACTION);
1516
1517 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1518 if (tcomplete == NULL) {
1519 return_error = BR_FAILED_REPLY;
1520 goto err_alloc_tcomplete_failed;
1521 }
1522 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1523
1524 t->debug_id = ++binder_last_id;
1525 e->debug_id = t->debug_id;
1526
1527 if (reply)
1528 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001529 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001530 proc->pid, thread->pid, t->debug_id,
1531 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001532 (u64)tr->data.ptr.buffer,
1533 (u64)tr->data.ptr.offsets,
1534 (u64)tr->data_size, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001535 else
1536 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001537 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001538 proc->pid, thread->pid, t->debug_id,
1539 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001540 (u64)tr->data.ptr.buffer,
1541 (u64)tr->data.ptr.offsets,
1542 (u64)tr->data_size, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001543
1544 if (!reply && !(tr->flags & TF_ONE_WAY))
1545 t->from = thread;
1546 else
1547 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03001548 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001549 t->to_proc = target_proc;
1550 t->to_thread = target_thread;
1551 t->code = tr->code;
1552 t->flags = tr->flags;
1553 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001554
1555 trace_binder_transaction(reply, t, target_node);
1556
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001557 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1558 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1559 if (t->buffer == NULL) {
1560 return_error = BR_FAILED_REPLY;
1561 goto err_binder_alloc_buf_failed;
1562 }
1563 t->buffer->allow_user_free = 0;
1564 t->buffer->debug_id = t->debug_id;
1565 t->buffer->transaction = t;
1566 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001567 trace_binder_transaction_alloc_buf(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001568 if (target_node)
1569 binder_inc_node(target_node, 1, 0, NULL);
1570
Arve Hjønnevågda498892014-02-21 14:40:26 -08001571 offp = (binder_size_t *)(t->buffer->data +
1572 ALIGN(tr->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001573
Arve Hjønnevågda498892014-02-21 14:40:26 -08001574 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1575 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301576 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1577 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001578 return_error = BR_FAILED_REPLY;
1579 goto err_copy_data_failed;
1580 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001581 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1582 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301583 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1584 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001585 return_error = BR_FAILED_REPLY;
1586 goto err_copy_data_failed;
1587 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001588 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1589 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1590 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001591 return_error = BR_FAILED_REPLY;
1592 goto err_bad_offset;
1593 }
1594 off_end = (void *)offp + tr->offsets_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001595 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001596 for (; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001597 struct binder_object_header *hdr;
1598 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001599
Martijn Coenenfeba3902017-02-03 14:40:45 -08001600 if (object_size == 0 || *offp < off_min) {
1601 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001602 proc->pid, thread->pid, (u64)*offp,
1603 (u64)off_min,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001604 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001605 return_error = BR_FAILED_REPLY;
1606 goto err_bad_offset;
1607 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001608
1609 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1610 off_min = *offp + object_size;
1611 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001612 case BINDER_TYPE_BINDER:
1613 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001614 struct flat_binder_object *fp;
1615 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001616 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001617
Martijn Coenenfeba3902017-02-03 14:40:45 -08001618 fp = to_flat_binder_object(hdr);
1619 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001620 if (node == NULL) {
1621 node = binder_new_node(proc, fp->binder, fp->cookie);
1622 if (node == NULL) {
1623 return_error = BR_FAILED_REPLY;
1624 goto err_binder_new_node_failed;
1625 }
1626 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1627 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1628 }
1629 if (fp->cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001630 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001631 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001632 (u64)fp->binder, node->debug_id,
1633 (u64)fp->cookie, (u64)node->cookie);
Christian Engelmayer7d420432014-05-07 21:44:53 +02001634 return_error = BR_FAILED_REPLY;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001635 goto err_binder_get_ref_for_node_failed;
1636 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001637 if (security_binder_transfer_binder(proc->tsk,
1638 target_proc->tsk)) {
1639 return_error = BR_FAILED_REPLY;
1640 goto err_binder_get_ref_for_node_failed;
1641 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001642 ref = binder_get_ref_for_node(target_proc, node);
1643 if (ref == NULL) {
1644 return_error = BR_FAILED_REPLY;
1645 goto err_binder_get_ref_for_node_failed;
1646 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001647 if (hdr->type == BINDER_TYPE_BINDER)
1648 hdr->type = BINDER_TYPE_HANDLE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001649 else
Martijn Coenenfeba3902017-02-03 14:40:45 -08001650 hdr->type = BINDER_TYPE_WEAK_HANDLE;
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001651 fp->binder = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001652 fp->handle = ref->desc;
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001653 fp->cookie = 0;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001654 binder_inc_ref(ref, hdr->type == BINDER_TYPE_HANDLE,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001655 &thread->todo);
1656
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001657 trace_binder_transaction_node_to_ref(t, node, ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001658 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001659 " node %d u%016llx -> ref %d desc %d\n",
1660 node->debug_id, (u64)node->ptr,
1661 ref->debug_id, ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001662 } break;
1663 case BINDER_TYPE_HANDLE:
1664 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001665 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001666 struct binder_ref *ref;
1667
Martijn Coenenfeba3902017-02-03 14:40:45 -08001668 fp = to_flat_binder_object(hdr);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001669 ref = binder_get_ref(proc, fp->handle,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001670 hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001671 if (ref == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001672 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
Anmol Sarma56b468f2012-10-30 22:35:43 +05301673 proc->pid,
1674 thread->pid, fp->handle);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001675 return_error = BR_FAILED_REPLY;
1676 goto err_binder_get_ref_failed;
1677 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001678 if (security_binder_transfer_binder(proc->tsk,
1679 target_proc->tsk)) {
1680 return_error = BR_FAILED_REPLY;
1681 goto err_binder_get_ref_failed;
1682 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001683 if (ref->node->proc == target_proc) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001684 if (hdr->type == BINDER_TYPE_HANDLE)
1685 hdr->type = BINDER_TYPE_BINDER;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001686 else
Martijn Coenenfeba3902017-02-03 14:40:45 -08001687 hdr->type = BINDER_TYPE_WEAK_BINDER;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001688 fp->binder = ref->node->ptr;
1689 fp->cookie = ref->node->cookie;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001690 binder_inc_node(ref->node,
1691 hdr->type == BINDER_TYPE_BINDER,
1692 0, NULL);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001693 trace_binder_transaction_ref_to_node(t, ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001694 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001695 " ref %d desc %d -> node %d u%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001696 ref->debug_id, ref->desc, ref->node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001697 (u64)ref->node->ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001698 } else {
1699 struct binder_ref *new_ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001700
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001701 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1702 if (new_ref == NULL) {
1703 return_error = BR_FAILED_REPLY;
1704 goto err_binder_get_ref_for_node_failed;
1705 }
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001706 fp->binder = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001707 fp->handle = new_ref->desc;
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001708 fp->cookie = 0;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001709 binder_inc_ref(new_ref,
1710 hdr->type == BINDER_TYPE_HANDLE,
1711 NULL);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001712 trace_binder_transaction_ref_to_ref(t, ref,
1713 new_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001714 binder_debug(BINDER_DEBUG_TRANSACTION,
1715 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1716 ref->debug_id, ref->desc, new_ref->debug_id,
1717 new_ref->desc, ref->node->debug_id);
1718 }
1719 } break;
1720
1721 case BINDER_TYPE_FD: {
1722 int target_fd;
1723 struct file *file;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001724 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001725
1726 if (reply) {
1727 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001728 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001729 proc->pid, thread->pid, fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001730 return_error = BR_FAILED_REPLY;
1731 goto err_fd_not_allowed;
1732 }
1733 } else if (!target_node->accept_fds) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001734 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001735 proc->pid, thread->pid, fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001736 return_error = BR_FAILED_REPLY;
1737 goto err_fd_not_allowed;
1738 }
1739
Martijn Coenenfeba3902017-02-03 14:40:45 -08001740 file = fget(fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001741 if (file == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001742 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001743 proc->pid, thread->pid, fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001744 return_error = BR_FAILED_REPLY;
1745 goto err_fget_failed;
1746 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001747 if (security_binder_transfer_file(proc->tsk,
1748 target_proc->tsk,
1749 file) < 0) {
1750 fput(file);
1751 return_error = BR_FAILED_REPLY;
1752 goto err_get_unused_fd_failed;
1753 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001754 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1755 if (target_fd < 0) {
1756 fput(file);
1757 return_error = BR_FAILED_REPLY;
1758 goto err_get_unused_fd_failed;
1759 }
1760 task_fd_install(target_proc, target_fd, file);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001761 trace_binder_transaction_fd(t, fp->fd, target_fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001762 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001763 " fd %d -> %d\n", fp->fd,
1764 target_fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001765 /* TODO: fput? */
Martijn Coenenfeba3902017-02-03 14:40:45 -08001766 fp->pad_binder = 0;
1767 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001768 } break;
1769
1770 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001771 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001772 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001773 return_error = BR_FAILED_REPLY;
1774 goto err_bad_object_type;
1775 }
1776 }
1777 if (reply) {
1778 BUG_ON(t->buffer->async_transaction != 0);
1779 binder_pop_transaction(target_thread, in_reply_to);
1780 } else if (!(t->flags & TF_ONE_WAY)) {
1781 BUG_ON(t->buffer->async_transaction != 0);
1782 t->need_reply = 1;
1783 t->from_parent = thread->transaction_stack;
1784 thread->transaction_stack = t;
1785 } else {
1786 BUG_ON(target_node == NULL);
1787 BUG_ON(t->buffer->async_transaction != 1);
1788 if (target_node->has_async_transaction) {
1789 target_list = &target_node->async_todo;
1790 target_wait = NULL;
1791 } else
1792 target_node->has_async_transaction = 1;
1793 }
1794 t->work.type = BINDER_WORK_TRANSACTION;
1795 list_add_tail(&t->work.entry, target_list);
1796 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1797 list_add_tail(&tcomplete->entry, &thread->todo);
1798 if (target_wait)
1799 wake_up_interruptible(target_wait);
1800 return;
1801
1802err_get_unused_fd_failed:
1803err_fget_failed:
1804err_fd_not_allowed:
1805err_binder_get_ref_for_node_failed:
1806err_binder_get_ref_failed:
1807err_binder_new_node_failed:
1808err_bad_object_type:
1809err_bad_offset:
1810err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001811 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001812 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1813 t->buffer->transaction = NULL;
1814 binder_free_buf(target_proc, t->buffer);
1815err_binder_alloc_buf_failed:
1816 kfree(tcomplete);
1817 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1818err_alloc_tcomplete_failed:
1819 kfree(t);
1820 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1821err_alloc_t_failed:
1822err_bad_call_stack:
1823err_empty_call_stack:
1824err_dead_binder:
1825err_invalid_target_handle:
1826err_no_context_mgr_node:
1827 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001828 "%d:%d transaction failed %d, size %lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001829 proc->pid, thread->pid, return_error,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001830 (u64)tr->data_size, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001831
1832 {
1833 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09001834
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001835 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1836 *fe = *e;
1837 }
1838
1839 BUG_ON(thread->return_error != BR_OK);
1840 if (in_reply_to) {
1841 thread->return_error = BR_TRANSACTION_COMPLETE;
1842 binder_send_failed_reply(in_reply_to, return_error);
1843 } else
1844 thread->return_error = return_error;
1845}
1846
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02001847static int binder_thread_write(struct binder_proc *proc,
1848 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001849 binder_uintptr_t binder_buffer, size_t size,
1850 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001851{
1852 uint32_t cmd;
Martijn Coenen342e5c92017-02-03 14:40:46 -08001853 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001854 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001855 void __user *ptr = buffer + *consumed;
1856 void __user *end = buffer + size;
1857
1858 while (ptr < end && thread->return_error == BR_OK) {
1859 if (get_user(cmd, (uint32_t __user *)ptr))
1860 return -EFAULT;
1861 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001862 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001863 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1864 binder_stats.bc[_IOC_NR(cmd)]++;
1865 proc->stats.bc[_IOC_NR(cmd)]++;
1866 thread->stats.bc[_IOC_NR(cmd)]++;
1867 }
1868 switch (cmd) {
1869 case BC_INCREFS:
1870 case BC_ACQUIRE:
1871 case BC_RELEASE:
1872 case BC_DECREFS: {
1873 uint32_t target;
1874 struct binder_ref *ref;
1875 const char *debug_string;
1876
1877 if (get_user(target, (uint32_t __user *)ptr))
1878 return -EFAULT;
1879 ptr += sizeof(uint32_t);
Martijn Coenen342e5c92017-02-03 14:40:46 -08001880 if (target == 0 && context->binder_context_mgr_node &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001881 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1882 ref = binder_get_ref_for_node(proc,
Martijn Coenen342e5c92017-02-03 14:40:46 -08001883 context->binder_context_mgr_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001884 if (ref->desc != target) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301885 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001886 proc->pid, thread->pid,
1887 ref->desc);
1888 }
1889 } else
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001890 ref = binder_get_ref(proc, target,
1891 cmd == BC_ACQUIRE ||
1892 cmd == BC_RELEASE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001893 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301894 binder_user_error("%d:%d refcount change on invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001895 proc->pid, thread->pid, target);
1896 break;
1897 }
1898 switch (cmd) {
1899 case BC_INCREFS:
1900 debug_string = "IncRefs";
1901 binder_inc_ref(ref, 0, NULL);
1902 break;
1903 case BC_ACQUIRE:
1904 debug_string = "Acquire";
1905 binder_inc_ref(ref, 1, NULL);
1906 break;
1907 case BC_RELEASE:
1908 debug_string = "Release";
1909 binder_dec_ref(ref, 1);
1910 break;
1911 case BC_DECREFS:
1912 default:
1913 debug_string = "DecRefs";
1914 binder_dec_ref(ref, 0);
1915 break;
1916 }
1917 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301918 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001919 proc->pid, thread->pid, debug_string, ref->debug_id,
1920 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1921 break;
1922 }
1923 case BC_INCREFS_DONE:
1924 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001925 binder_uintptr_t node_ptr;
1926 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001927 struct binder_node *node;
1928
Arve Hjønnevågda498892014-02-21 14:40:26 -08001929 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001930 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001931 ptr += sizeof(binder_uintptr_t);
1932 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001933 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001934 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001935 node = binder_get_node(proc, node_ptr);
1936 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001937 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001938 proc->pid, thread->pid,
1939 cmd == BC_INCREFS_DONE ?
1940 "BC_INCREFS_DONE" :
1941 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001942 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001943 break;
1944 }
1945 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001946 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001947 proc->pid, thread->pid,
1948 cmd == BC_INCREFS_DONE ?
1949 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001950 (u64)node_ptr, node->debug_id,
1951 (u64)cookie, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001952 break;
1953 }
1954 if (cmd == BC_ACQUIRE_DONE) {
1955 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301956 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001957 proc->pid, thread->pid,
1958 node->debug_id);
1959 break;
1960 }
1961 node->pending_strong_ref = 0;
1962 } else {
1963 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301964 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001965 proc->pid, thread->pid,
1966 node->debug_id);
1967 break;
1968 }
1969 node->pending_weak_ref = 0;
1970 }
1971 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1972 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301973 "%d:%d %s node %d ls %d lw %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001974 proc->pid, thread->pid,
1975 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1976 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1977 break;
1978 }
1979 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05301980 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001981 return -EINVAL;
1982 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05301983 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001984 return -EINVAL;
1985
1986 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001987 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001988 struct binder_buffer *buffer;
1989
Arve Hjønnevågda498892014-02-21 14:40:26 -08001990 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001991 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001992 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001993
1994 buffer = binder_buffer_lookup(proc, data_ptr);
1995 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001996 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1997 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001998 break;
1999 }
2000 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002001 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2002 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002003 break;
2004 }
2005 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002006 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2007 proc->pid, thread->pid, (u64)data_ptr,
2008 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002009 buffer->transaction ? "active" : "finished");
2010
2011 if (buffer->transaction) {
2012 buffer->transaction->buffer = NULL;
2013 buffer->transaction = NULL;
2014 }
2015 if (buffer->async_transaction && buffer->target_node) {
2016 BUG_ON(!buffer->target_node->has_async_transaction);
2017 if (list_empty(&buffer->target_node->async_todo))
2018 buffer->target_node->has_async_transaction = 0;
2019 else
2020 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2021 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002022 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002023 binder_transaction_buffer_release(proc, buffer, NULL);
2024 binder_free_buf(proc, buffer);
2025 break;
2026 }
2027
2028 case BC_TRANSACTION:
2029 case BC_REPLY: {
2030 struct binder_transaction_data tr;
2031
2032 if (copy_from_user(&tr, ptr, sizeof(tr)))
2033 return -EFAULT;
2034 ptr += sizeof(tr);
2035 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
2036 break;
2037 }
2038
2039 case BC_REGISTER_LOOPER:
2040 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302041 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002042 proc->pid, thread->pid);
2043 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2044 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302045 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002046 proc->pid, thread->pid);
2047 } else if (proc->requested_threads == 0) {
2048 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302049 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002050 proc->pid, thread->pid);
2051 } else {
2052 proc->requested_threads--;
2053 proc->requested_threads_started++;
2054 }
2055 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2056 break;
2057 case BC_ENTER_LOOPER:
2058 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302059 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002060 proc->pid, thread->pid);
2061 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2062 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302063 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002064 proc->pid, thread->pid);
2065 }
2066 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2067 break;
2068 case BC_EXIT_LOOPER:
2069 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302070 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002071 proc->pid, thread->pid);
2072 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2073 break;
2074
2075 case BC_REQUEST_DEATH_NOTIFICATION:
2076 case BC_CLEAR_DEATH_NOTIFICATION: {
2077 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002078 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002079 struct binder_ref *ref;
2080 struct binder_ref_death *death;
2081
2082 if (get_user(target, (uint32_t __user *)ptr))
2083 return -EFAULT;
2084 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002085 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002086 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002087 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002088 ref = binder_get_ref(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002089 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302090 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002091 proc->pid, thread->pid,
2092 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2093 "BC_REQUEST_DEATH_NOTIFICATION" :
2094 "BC_CLEAR_DEATH_NOTIFICATION",
2095 target);
2096 break;
2097 }
2098
2099 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002100 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002101 proc->pid, thread->pid,
2102 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2103 "BC_REQUEST_DEATH_NOTIFICATION" :
2104 "BC_CLEAR_DEATH_NOTIFICATION",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002105 (u64)cookie, ref->debug_id, ref->desc,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002106 ref->strong, ref->weak, ref->node->debug_id);
2107
2108 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2109 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302110 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002111 proc->pid, thread->pid);
2112 break;
2113 }
2114 death = kzalloc(sizeof(*death), GFP_KERNEL);
2115 if (death == NULL) {
2116 thread->return_error = BR_ERROR;
2117 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302118 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002119 proc->pid, thread->pid);
2120 break;
2121 }
2122 binder_stats_created(BINDER_STAT_DEATH);
2123 INIT_LIST_HEAD(&death->work.entry);
2124 death->cookie = cookie;
2125 ref->death = death;
2126 if (ref->node->proc == NULL) {
2127 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2128 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2129 list_add_tail(&ref->death->work.entry, &thread->todo);
2130 } else {
2131 list_add_tail(&ref->death->work.entry, &proc->todo);
2132 wake_up_interruptible(&proc->wait);
2133 }
2134 }
2135 } else {
2136 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302137 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002138 proc->pid, thread->pid);
2139 break;
2140 }
2141 death = ref->death;
2142 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002143 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002144 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002145 (u64)death->cookie,
2146 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002147 break;
2148 }
2149 ref->death = NULL;
2150 if (list_empty(&death->work.entry)) {
2151 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2152 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2153 list_add_tail(&death->work.entry, &thread->todo);
2154 } else {
2155 list_add_tail(&death->work.entry, &proc->todo);
2156 wake_up_interruptible(&proc->wait);
2157 }
2158 } else {
2159 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2160 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2161 }
2162 }
2163 } break;
2164 case BC_DEAD_BINDER_DONE: {
2165 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002166 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002167 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09002168
Arve Hjønnevågda498892014-02-21 14:40:26 -08002169 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002170 return -EFAULT;
2171
Lisa Du7a64cd82016-02-17 09:32:52 +08002172 ptr += sizeof(cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002173 list_for_each_entry(w, &proc->delivered_death, entry) {
2174 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
Seunghun Lee10f62862014-05-01 01:30:23 +09002175
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002176 if (tmp_death->cookie == cookie) {
2177 death = tmp_death;
2178 break;
2179 }
2180 }
2181 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002182 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2183 proc->pid, thread->pid, (u64)cookie,
2184 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002185 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002186 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2187 proc->pid, thread->pid, (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002188 break;
2189 }
2190
2191 list_del_init(&death->work.entry);
2192 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2193 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2194 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2195 list_add_tail(&death->work.entry, &thread->todo);
2196 } else {
2197 list_add_tail(&death->work.entry, &proc->todo);
2198 wake_up_interruptible(&proc->wait);
2199 }
2200 }
2201 } break;
2202
2203 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302204 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002205 proc->pid, thread->pid, cmd);
2206 return -EINVAL;
2207 }
2208 *consumed = ptr - buffer;
2209 }
2210 return 0;
2211}
2212
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02002213static void binder_stat_br(struct binder_proc *proc,
2214 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002215{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002216 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002217 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2218 binder_stats.br[_IOC_NR(cmd)]++;
2219 proc->stats.br[_IOC_NR(cmd)]++;
2220 thread->stats.br[_IOC_NR(cmd)]++;
2221 }
2222}
2223
2224static int binder_has_proc_work(struct binder_proc *proc,
2225 struct binder_thread *thread)
2226{
2227 return !list_empty(&proc->todo) ||
2228 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2229}
2230
2231static int binder_has_thread_work(struct binder_thread *thread)
2232{
2233 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2234 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2235}
2236
2237static int binder_thread_read(struct binder_proc *proc,
2238 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002239 binder_uintptr_t binder_buffer, size_t size,
2240 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002241{
Arve Hjønnevågda498892014-02-21 14:40:26 -08002242 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002243 void __user *ptr = buffer + *consumed;
2244 void __user *end = buffer + size;
2245
2246 int ret = 0;
2247 int wait_for_proc_work;
2248
2249 if (*consumed == 0) {
2250 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2251 return -EFAULT;
2252 ptr += sizeof(uint32_t);
2253 }
2254
2255retry:
2256 wait_for_proc_work = thread->transaction_stack == NULL &&
2257 list_empty(&thread->todo);
2258
2259 if (thread->return_error != BR_OK && ptr < end) {
2260 if (thread->return_error2 != BR_OK) {
2261 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2262 return -EFAULT;
2263 ptr += sizeof(uint32_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002264 binder_stat_br(proc, thread, thread->return_error2);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002265 if (ptr == end)
2266 goto done;
2267 thread->return_error2 = BR_OK;
2268 }
2269 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2270 return -EFAULT;
2271 ptr += sizeof(uint32_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002272 binder_stat_br(proc, thread, thread->return_error);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002273 thread->return_error = BR_OK;
2274 goto done;
2275 }
2276
2277
2278 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2279 if (wait_for_proc_work)
2280 proc->ready_threads++;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002281
2282 binder_unlock(__func__);
2283
2284 trace_binder_wait_for_work(wait_for_proc_work,
2285 !!thread->transaction_stack,
2286 !list_empty(&thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002287 if (wait_for_proc_work) {
2288 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2289 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302290 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002291 proc->pid, thread->pid, thread->looper);
2292 wait_event_interruptible(binder_user_error_wait,
2293 binder_stop_on_user_error < 2);
2294 }
2295 binder_set_nice(proc->default_priority);
2296 if (non_block) {
2297 if (!binder_has_proc_work(proc, thread))
2298 ret = -EAGAIN;
2299 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002300 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002301 } else {
2302 if (non_block) {
2303 if (!binder_has_thread_work(thread))
2304 ret = -EAGAIN;
2305 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002306 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002307 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002308
2309 binder_lock(__func__);
2310
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002311 if (wait_for_proc_work)
2312 proc->ready_threads--;
2313 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2314
2315 if (ret)
2316 return ret;
2317
2318 while (1) {
2319 uint32_t cmd;
2320 struct binder_transaction_data tr;
2321 struct binder_work *w;
2322 struct binder_transaction *t = NULL;
2323
Dmitry Voytik395262a2014-09-08 18:16:34 +04002324 if (!list_empty(&thread->todo)) {
2325 w = list_first_entry(&thread->todo, struct binder_work,
2326 entry);
2327 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2328 w = list_first_entry(&proc->todo, struct binder_work,
2329 entry);
2330 } else {
2331 /* no data added */
2332 if (ptr - buffer == 4 &&
2333 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002334 goto retry;
2335 break;
2336 }
2337
2338 if (end - ptr < sizeof(tr) + 4)
2339 break;
2340
2341 switch (w->type) {
2342 case BINDER_WORK_TRANSACTION: {
2343 t = container_of(w, struct binder_transaction, work);
2344 } break;
2345 case BINDER_WORK_TRANSACTION_COMPLETE: {
2346 cmd = BR_TRANSACTION_COMPLETE;
2347 if (put_user(cmd, (uint32_t __user *)ptr))
2348 return -EFAULT;
2349 ptr += sizeof(uint32_t);
2350
2351 binder_stat_br(proc, thread, cmd);
2352 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302353 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002354 proc->pid, thread->pid);
2355
2356 list_del(&w->entry);
2357 kfree(w);
2358 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2359 } break;
2360 case BINDER_WORK_NODE: {
2361 struct binder_node *node = container_of(w, struct binder_node, work);
2362 uint32_t cmd = BR_NOOP;
2363 const char *cmd_name;
2364 int strong = node->internal_strong_refs || node->local_strong_refs;
2365 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
Seunghun Lee10f62862014-05-01 01:30:23 +09002366
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002367 if (weak && !node->has_weak_ref) {
2368 cmd = BR_INCREFS;
2369 cmd_name = "BR_INCREFS";
2370 node->has_weak_ref = 1;
2371 node->pending_weak_ref = 1;
2372 node->local_weak_refs++;
2373 } else if (strong && !node->has_strong_ref) {
2374 cmd = BR_ACQUIRE;
2375 cmd_name = "BR_ACQUIRE";
2376 node->has_strong_ref = 1;
2377 node->pending_strong_ref = 1;
2378 node->local_strong_refs++;
2379 } else if (!strong && node->has_strong_ref) {
2380 cmd = BR_RELEASE;
2381 cmd_name = "BR_RELEASE";
2382 node->has_strong_ref = 0;
2383 } else if (!weak && node->has_weak_ref) {
2384 cmd = BR_DECREFS;
2385 cmd_name = "BR_DECREFS";
2386 node->has_weak_ref = 0;
2387 }
2388 if (cmd != BR_NOOP) {
2389 if (put_user(cmd, (uint32_t __user *)ptr))
2390 return -EFAULT;
2391 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002392 if (put_user(node->ptr,
2393 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002394 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002395 ptr += sizeof(binder_uintptr_t);
2396 if (put_user(node->cookie,
2397 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002398 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002399 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002400
2401 binder_stat_br(proc, thread, cmd);
2402 binder_debug(BINDER_DEBUG_USER_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002403 "%d:%d %s %d u%016llx c%016llx\n",
2404 proc->pid, thread->pid, cmd_name,
2405 node->debug_id,
2406 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002407 } else {
2408 list_del_init(&w->entry);
2409 if (!weak && !strong) {
2410 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002411 "%d:%d node %d u%016llx c%016llx deleted\n",
2412 proc->pid, thread->pid,
2413 node->debug_id,
2414 (u64)node->ptr,
2415 (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002416 rb_erase(&node->rb_node, &proc->nodes);
2417 kfree(node);
2418 binder_stats_deleted(BINDER_STAT_NODE);
2419 } else {
2420 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002421 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2422 proc->pid, thread->pid,
2423 node->debug_id,
2424 (u64)node->ptr,
2425 (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002426 }
2427 }
2428 } break;
2429 case BINDER_WORK_DEAD_BINDER:
2430 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2431 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2432 struct binder_ref_death *death;
2433 uint32_t cmd;
2434
2435 death = container_of(w, struct binder_ref_death, work);
2436 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2437 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2438 else
2439 cmd = BR_DEAD_BINDER;
2440 if (put_user(cmd, (uint32_t __user *)ptr))
2441 return -EFAULT;
2442 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002443 if (put_user(death->cookie,
2444 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002445 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002446 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002447 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002448 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002449 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002450 proc->pid, thread->pid,
2451 cmd == BR_DEAD_BINDER ?
2452 "BR_DEAD_BINDER" :
2453 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002454 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002455
2456 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2457 list_del(&w->entry);
2458 kfree(death);
2459 binder_stats_deleted(BINDER_STAT_DEATH);
2460 } else
2461 list_move(&w->entry, &proc->delivered_death);
2462 if (cmd == BR_DEAD_BINDER)
2463 goto done; /* DEAD_BINDER notifications can cause transactions */
2464 } break;
2465 }
2466
2467 if (!t)
2468 continue;
2469
2470 BUG_ON(t->buffer == NULL);
2471 if (t->buffer->target_node) {
2472 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002473
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002474 tr.target.ptr = target_node->ptr;
2475 tr.cookie = target_node->cookie;
2476 t->saved_priority = task_nice(current);
2477 if (t->priority < target_node->min_priority &&
2478 !(t->flags & TF_ONE_WAY))
2479 binder_set_nice(t->priority);
2480 else if (!(t->flags & TF_ONE_WAY) ||
2481 t->saved_priority > target_node->min_priority)
2482 binder_set_nice(target_node->min_priority);
2483 cmd = BR_TRANSACTION;
2484 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002485 tr.target.ptr = 0;
2486 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002487 cmd = BR_REPLY;
2488 }
2489 tr.code = t->code;
2490 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06002491 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002492
2493 if (t->from) {
2494 struct task_struct *sender = t->from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09002495
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002496 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08002497 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002498 } else {
2499 tr.sender_pid = 0;
2500 }
2501
2502 tr.data_size = t->buffer->data_size;
2503 tr.offsets_size = t->buffer->offsets_size;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002504 tr.data.ptr.buffer = (binder_uintptr_t)(
2505 (uintptr_t)t->buffer->data +
2506 proc->user_buffer_offset);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002507 tr.data.ptr.offsets = tr.data.ptr.buffer +
2508 ALIGN(t->buffer->data_size,
2509 sizeof(void *));
2510
2511 if (put_user(cmd, (uint32_t __user *)ptr))
2512 return -EFAULT;
2513 ptr += sizeof(uint32_t);
2514 if (copy_to_user(ptr, &tr, sizeof(tr)))
2515 return -EFAULT;
2516 ptr += sizeof(tr);
2517
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002518 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002519 binder_stat_br(proc, thread, cmd);
2520 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002521 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002522 proc->pid, thread->pid,
2523 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2524 "BR_REPLY",
2525 t->debug_id, t->from ? t->from->proc->pid : 0,
2526 t->from ? t->from->pid : 0, cmd,
2527 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002528 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002529
2530 list_del(&t->work.entry);
2531 t->buffer->allow_user_free = 1;
2532 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2533 t->to_parent = thread->transaction_stack;
2534 t->to_thread = thread;
2535 thread->transaction_stack = t;
2536 } else {
2537 t->buffer->transaction = NULL;
2538 kfree(t);
2539 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2540 }
2541 break;
2542 }
2543
2544done:
2545
2546 *consumed = ptr - buffer;
2547 if (proc->requested_threads + proc->ready_threads == 0 &&
2548 proc->requested_threads_started < proc->max_threads &&
2549 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2550 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2551 /*spawn a new thread if we leave this out */) {
2552 proc->requested_threads++;
2553 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302554 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002555 proc->pid, thread->pid);
2556 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2557 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002558 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002559 }
2560 return 0;
2561}
2562
2563static void binder_release_work(struct list_head *list)
2564{
2565 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09002566
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002567 while (!list_empty(list)) {
2568 w = list_first_entry(list, struct binder_work, entry);
2569 list_del_init(&w->entry);
2570 switch (w->type) {
2571 case BINDER_WORK_TRANSACTION: {
2572 struct binder_transaction *t;
2573
2574 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002575 if (t->buffer->target_node &&
2576 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002577 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002578 } else {
2579 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302580 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002581 t->debug_id);
2582 t->buffer->transaction = NULL;
2583 kfree(t);
2584 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2585 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002586 } break;
2587 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002588 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302589 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002590 kfree(w);
2591 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2592 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002593 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2594 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2595 struct binder_ref_death *death;
2596
2597 death = container_of(w, struct binder_ref_death, work);
2598 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002599 "undelivered death notification, %016llx\n",
2600 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002601 kfree(death);
2602 binder_stats_deleted(BINDER_STAT_DEATH);
2603 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002604 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302605 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002606 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002607 break;
2608 }
2609 }
2610
2611}
2612
2613static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2614{
2615 struct binder_thread *thread = NULL;
2616 struct rb_node *parent = NULL;
2617 struct rb_node **p = &proc->threads.rb_node;
2618
2619 while (*p) {
2620 parent = *p;
2621 thread = rb_entry(parent, struct binder_thread, rb_node);
2622
2623 if (current->pid < thread->pid)
2624 p = &(*p)->rb_left;
2625 else if (current->pid > thread->pid)
2626 p = &(*p)->rb_right;
2627 else
2628 break;
2629 }
2630 if (*p == NULL) {
2631 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2632 if (thread == NULL)
2633 return NULL;
2634 binder_stats_created(BINDER_STAT_THREAD);
2635 thread->proc = proc;
2636 thread->pid = current->pid;
2637 init_waitqueue_head(&thread->wait);
2638 INIT_LIST_HEAD(&thread->todo);
2639 rb_link_node(&thread->rb_node, parent, p);
2640 rb_insert_color(&thread->rb_node, &proc->threads);
2641 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2642 thread->return_error = BR_OK;
2643 thread->return_error2 = BR_OK;
2644 }
2645 return thread;
2646}
2647
2648static int binder_free_thread(struct binder_proc *proc,
2649 struct binder_thread *thread)
2650{
2651 struct binder_transaction *t;
2652 struct binder_transaction *send_reply = NULL;
2653 int active_transactions = 0;
2654
2655 rb_erase(&thread->rb_node, &proc->threads);
2656 t = thread->transaction_stack;
2657 if (t && t->to_thread == thread)
2658 send_reply = t;
2659 while (t) {
2660 active_transactions++;
2661 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302662 "release %d:%d transaction %d %s, still active\n",
2663 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002664 t->debug_id,
2665 (t->to_thread == thread) ? "in" : "out");
2666
2667 if (t->to_thread == thread) {
2668 t->to_proc = NULL;
2669 t->to_thread = NULL;
2670 if (t->buffer) {
2671 t->buffer->transaction = NULL;
2672 t->buffer = NULL;
2673 }
2674 t = t->to_parent;
2675 } else if (t->from == thread) {
2676 t->from = NULL;
2677 t = t->from_parent;
2678 } else
2679 BUG();
2680 }
2681 if (send_reply)
2682 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2683 binder_release_work(&thread->todo);
2684 kfree(thread);
2685 binder_stats_deleted(BINDER_STAT_THREAD);
2686 return active_transactions;
2687}
2688
2689static unsigned int binder_poll(struct file *filp,
2690 struct poll_table_struct *wait)
2691{
2692 struct binder_proc *proc = filp->private_data;
2693 struct binder_thread *thread = NULL;
2694 int wait_for_proc_work;
2695
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002696 binder_lock(__func__);
2697
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002698 thread = binder_get_thread(proc);
2699
2700 wait_for_proc_work = thread->transaction_stack == NULL &&
2701 list_empty(&thread->todo) && thread->return_error == BR_OK;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002702
2703 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002704
2705 if (wait_for_proc_work) {
2706 if (binder_has_proc_work(proc, thread))
2707 return POLLIN;
2708 poll_wait(filp, &proc->wait, wait);
2709 if (binder_has_proc_work(proc, thread))
2710 return POLLIN;
2711 } else {
2712 if (binder_has_thread_work(thread))
2713 return POLLIN;
2714 poll_wait(filp, &thread->wait, wait);
2715 if (binder_has_thread_work(thread))
2716 return POLLIN;
2717 }
2718 return 0;
2719}
2720
Tair Rzayev78260ac2014-06-03 22:27:21 +03002721static int binder_ioctl_write_read(struct file *filp,
2722 unsigned int cmd, unsigned long arg,
2723 struct binder_thread *thread)
2724{
2725 int ret = 0;
2726 struct binder_proc *proc = filp->private_data;
2727 unsigned int size = _IOC_SIZE(cmd);
2728 void __user *ubuf = (void __user *)arg;
2729 struct binder_write_read bwr;
2730
2731 if (size != sizeof(struct binder_write_read)) {
2732 ret = -EINVAL;
2733 goto out;
2734 }
2735 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2736 ret = -EFAULT;
2737 goto out;
2738 }
2739 binder_debug(BINDER_DEBUG_READ_WRITE,
2740 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2741 proc->pid, thread->pid,
2742 (u64)bwr.write_size, (u64)bwr.write_buffer,
2743 (u64)bwr.read_size, (u64)bwr.read_buffer);
2744
2745 if (bwr.write_size > 0) {
2746 ret = binder_thread_write(proc, thread,
2747 bwr.write_buffer,
2748 bwr.write_size,
2749 &bwr.write_consumed);
2750 trace_binder_write_done(ret);
2751 if (ret < 0) {
2752 bwr.read_consumed = 0;
2753 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2754 ret = -EFAULT;
2755 goto out;
2756 }
2757 }
2758 if (bwr.read_size > 0) {
2759 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2760 bwr.read_size,
2761 &bwr.read_consumed,
2762 filp->f_flags & O_NONBLOCK);
2763 trace_binder_read_done(ret);
2764 if (!list_empty(&proc->todo))
2765 wake_up_interruptible(&proc->wait);
2766 if (ret < 0) {
2767 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2768 ret = -EFAULT;
2769 goto out;
2770 }
2771 }
2772 binder_debug(BINDER_DEBUG_READ_WRITE,
2773 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2774 proc->pid, thread->pid,
2775 (u64)bwr.write_consumed, (u64)bwr.write_size,
2776 (u64)bwr.read_consumed, (u64)bwr.read_size);
2777 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2778 ret = -EFAULT;
2779 goto out;
2780 }
2781out:
2782 return ret;
2783}
2784
2785static int binder_ioctl_set_ctx_mgr(struct file *filp)
2786{
2787 int ret = 0;
2788 struct binder_proc *proc = filp->private_data;
Martijn Coenen342e5c92017-02-03 14:40:46 -08002789 struct binder_context *context = proc->context;
2790
Tair Rzayev78260ac2014-06-03 22:27:21 +03002791 kuid_t curr_euid = current_euid();
2792
Martijn Coenen342e5c92017-02-03 14:40:46 -08002793 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002794 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2795 ret = -EBUSY;
2796 goto out;
2797 }
Stephen Smalley79af7302015-01-21 10:54:10 -05002798 ret = security_binder_set_context_mgr(proc->tsk);
2799 if (ret < 0)
2800 goto out;
Martijn Coenen342e5c92017-02-03 14:40:46 -08002801 if (uid_valid(context->binder_context_mgr_uid)) {
2802 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002803 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2804 from_kuid(&init_user_ns, curr_euid),
2805 from_kuid(&init_user_ns,
Martijn Coenen342e5c92017-02-03 14:40:46 -08002806 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03002807 ret = -EPERM;
2808 goto out;
2809 }
2810 } else {
Martijn Coenen342e5c92017-02-03 14:40:46 -08002811 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03002812 }
Martijn Coenen342e5c92017-02-03 14:40:46 -08002813 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
2814 if (!context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002815 ret = -ENOMEM;
2816 goto out;
2817 }
Martijn Coenen342e5c92017-02-03 14:40:46 -08002818 context->binder_context_mgr_node->local_weak_refs++;
2819 context->binder_context_mgr_node->local_strong_refs++;
2820 context->binder_context_mgr_node->has_strong_ref = 1;
2821 context->binder_context_mgr_node->has_weak_ref = 1;
Tair Rzayev78260ac2014-06-03 22:27:21 +03002822out:
2823 return ret;
2824}
2825
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002826static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2827{
2828 int ret;
2829 struct binder_proc *proc = filp->private_data;
2830 struct binder_thread *thread;
2831 unsigned int size = _IOC_SIZE(cmd);
2832 void __user *ubuf = (void __user *)arg;
2833
Tair Rzayev78260ac2014-06-03 22:27:21 +03002834 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2835 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002836
Chen Fenga906d692016-02-01 14:04:02 +08002837 if (unlikely(current->mm != proc->vma_vm_mm)) {
2838 pr_err("current mm mismatch proc mm\n");
2839 return -EINVAL;
2840 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002841 trace_binder_ioctl(cmd, arg);
2842
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002843 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2844 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002845 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002846
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002847 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002848 thread = binder_get_thread(proc);
2849 if (thread == NULL) {
2850 ret = -ENOMEM;
2851 goto err;
2852 }
2853
2854 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002855 case BINDER_WRITE_READ:
2856 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2857 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002858 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002859 break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002860 case BINDER_SET_MAX_THREADS:
2861 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2862 ret = -EINVAL;
2863 goto err;
2864 }
2865 break;
2866 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03002867 ret = binder_ioctl_set_ctx_mgr(filp);
2868 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002869 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002870 break;
2871 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302872 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002873 proc->pid, thread->pid);
2874 binder_free_thread(proc, thread);
2875 thread = NULL;
2876 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02002877 case BINDER_VERSION: {
2878 struct binder_version __user *ver = ubuf;
2879
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002880 if (size != sizeof(struct binder_version)) {
2881 ret = -EINVAL;
2882 goto err;
2883 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02002884 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2885 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002886 ret = -EINVAL;
2887 goto err;
2888 }
2889 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02002890 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002891 default:
2892 ret = -EINVAL;
2893 goto err;
2894 }
2895 ret = 0;
2896err:
2897 if (thread)
2898 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002899 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002900 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2901 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05302902 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002903err_unlocked:
2904 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002905 return ret;
2906}
2907
2908static void binder_vma_open(struct vm_area_struct *vma)
2909{
2910 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09002911
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002912 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302913 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002914 proc->pid, vma->vm_start, vma->vm_end,
2915 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2916 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002917}
2918
2919static void binder_vma_close(struct vm_area_struct *vma)
2920{
2921 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09002922
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002923 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302924 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002925 proc->pid, vma->vm_start, vma->vm_end,
2926 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2927 (unsigned long)pgprot_val(vma->vm_page_prot));
2928 proc->vma = NULL;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -08002929 proc->vma_vm_mm = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002930 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2931}
2932
Vinayak Menonddac7d52014-06-02 18:17:59 +05302933static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2934{
2935 return VM_FAULT_SIGBUS;
2936}
2937
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07002938static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002939 .open = binder_vma_open,
2940 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05302941 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002942};
2943
2944static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2945{
2946 int ret;
2947 struct vm_struct *area;
2948 struct binder_proc *proc = filp->private_data;
2949 const char *failure_string;
2950 struct binder_buffer *buffer;
2951
Al Viroa79f41e2012-08-15 18:23:36 -04002952 if (proc->tsk != current)
2953 return -EINVAL;
2954
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002955 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2956 vma->vm_end = vma->vm_start + SZ_4M;
2957
2958 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2959 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2960 proc->pid, vma->vm_start, vma->vm_end,
2961 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2962 (unsigned long)pgprot_val(vma->vm_page_prot));
2963
2964 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2965 ret = -EPERM;
2966 failure_string = "bad vm_flags";
2967 goto err_bad_arg;
2968 }
2969 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2970
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08002971 mutex_lock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002972 if (proc->buffer) {
2973 ret = -EBUSY;
2974 failure_string = "already mapped";
2975 goto err_already_mapped;
2976 }
2977
2978 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2979 if (area == NULL) {
2980 ret = -ENOMEM;
2981 failure_string = "get_vm_area";
2982 goto err_get_vm_area_failed;
2983 }
2984 proc->buffer = area->addr;
2985 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08002986 mutex_unlock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002987
2988#ifdef CONFIG_CPU_CACHE_VIPT
2989 if (cache_is_vipt_aliasing()) {
2990 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
Sherwin Soltani258767f2012-06-26 02:00:30 -04002991 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002992 vma->vm_start += PAGE_SIZE;
2993 }
2994 }
2995#endif
2996 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2997 if (proc->pages == NULL) {
2998 ret = -ENOMEM;
2999 failure_string = "alloc page array";
3000 goto err_alloc_pages_failed;
3001 }
3002 proc->buffer_size = vma->vm_end - vma->vm_start;
3003
3004 vma->vm_ops = &binder_vm_ops;
3005 vma->vm_private_data = proc;
3006
3007 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
3008 ret = -ENOMEM;
3009 failure_string = "alloc small buf";
3010 goto err_alloc_small_buf_failed;
3011 }
3012 buffer = proc->buffer;
3013 INIT_LIST_HEAD(&proc->buffers);
3014 list_add(&buffer->entry, &proc->buffers);
3015 buffer->free = 1;
3016 binder_insert_free_buffer(proc, buffer);
3017 proc->free_async_space = proc->buffer_size / 2;
3018 barrier();
Al Viroa79f41e2012-08-15 18:23:36 -04003019 proc->files = get_files_struct(current);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003020 proc->vma = vma;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -08003021 proc->vma_vm_mm = vma->vm_mm;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003022
Sherwin Soltani258767f2012-06-26 02:00:30 -04003023 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003024 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3025 return 0;
3026
3027err_alloc_small_buf_failed:
3028 kfree(proc->pages);
3029 proc->pages = NULL;
3030err_alloc_pages_failed:
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08003031 mutex_lock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003032 vfree(proc->buffer);
3033 proc->buffer = NULL;
3034err_get_vm_area_failed:
3035err_already_mapped:
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08003036 mutex_unlock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003037err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04003038 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003039 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3040 return ret;
3041}
3042
3043static int binder_open(struct inode *nodp, struct file *filp)
3044{
3045 struct binder_proc *proc;
3046
3047 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3048 current->group_leader->pid, current->pid);
3049
3050 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3051 if (proc == NULL)
3052 return -ENOMEM;
3053 get_task_struct(current);
3054 proc->tsk = current;
Chen Fenga906d692016-02-01 14:04:02 +08003055 proc->vma_vm_mm = current->mm;
Martijn Coenen342e5c92017-02-03 14:40:46 -08003056 proc->context = &global_context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003057 INIT_LIST_HEAD(&proc->todo);
3058 init_waitqueue_head(&proc->wait);
3059 proc->default_priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003060
3061 binder_lock(__func__);
3062
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003063 binder_stats_created(BINDER_STAT_PROC);
3064 hlist_add_head(&proc->proc_node, &binder_procs);
3065 proc->pid = current->group_leader->pid;
3066 INIT_LIST_HEAD(&proc->delivered_death);
3067 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003068
3069 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003070
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003071 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003072 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09003073
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003074 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003075 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3076 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003077 }
3078
3079 return 0;
3080}
3081
3082static int binder_flush(struct file *filp, fl_owner_t id)
3083{
3084 struct binder_proc *proc = filp->private_data;
3085
3086 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3087
3088 return 0;
3089}
3090
3091static void binder_deferred_flush(struct binder_proc *proc)
3092{
3093 struct rb_node *n;
3094 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09003095
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003096 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3097 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09003098
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003099 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3100 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3101 wake_up_interruptible(&thread->wait);
3102 wake_count++;
3103 }
3104 }
3105 wake_up_interruptible_all(&proc->wait);
3106
3107 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3108 "binder_flush: %d woke %d threads\n", proc->pid,
3109 wake_count);
3110}
3111
3112static int binder_release(struct inode *nodp, struct file *filp)
3113{
3114 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09003115
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003116 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003117 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3118
3119 return 0;
3120}
3121
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003122static int binder_node_release(struct binder_node *node, int refs)
3123{
3124 struct binder_ref *ref;
3125 int death = 0;
3126
3127 list_del_init(&node->work.entry);
3128 binder_release_work(&node->async_todo);
3129
3130 if (hlist_empty(&node->refs)) {
3131 kfree(node);
3132 binder_stats_deleted(BINDER_STAT_NODE);
3133
3134 return refs;
3135 }
3136
3137 node->proc = NULL;
3138 node->local_strong_refs = 0;
3139 node->local_weak_refs = 0;
3140 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3141
3142 hlist_for_each_entry(ref, &node->refs, node_entry) {
3143 refs++;
3144
3145 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08003146 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003147
3148 death++;
3149
3150 if (list_empty(&ref->death->work.entry)) {
3151 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3152 list_add_tail(&ref->death->work.entry,
3153 &ref->proc->todo);
3154 wake_up_interruptible(&ref->proc->wait);
3155 } else
3156 BUG();
3157 }
3158
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003159 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3160 "node %d now dead, refs %d, death %d\n",
3161 node->debug_id, refs, death);
3162
3163 return refs;
3164}
3165
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003166static void binder_deferred_release(struct binder_proc *proc)
3167{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003168 struct binder_transaction *t;
Martijn Coenen342e5c92017-02-03 14:40:46 -08003169 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003170 struct rb_node *n;
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003171 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3172 active_transactions, page_count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003173
3174 BUG_ON(proc->vma);
3175 BUG_ON(proc->files);
3176
3177 hlist_del(&proc->proc_node);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003178
Martijn Coenen342e5c92017-02-03 14:40:46 -08003179 if (context->binder_context_mgr_node &&
3180 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003181 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003182 "%s: %d context_mgr_node gone\n",
3183 __func__, proc->pid);
Martijn Coenen342e5c92017-02-03 14:40:46 -08003184 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003185 }
3186
3187 threads = 0;
3188 active_transactions = 0;
3189 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003190 struct binder_thread *thread;
3191
3192 thread = rb_entry(n, struct binder_thread, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003193 threads++;
3194 active_transactions += binder_free_thread(proc, thread);
3195 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003196
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003197 nodes = 0;
3198 incoming_refs = 0;
3199 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003200 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003201
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003202 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003203 nodes++;
3204 rb_erase(&node->rb_node, &proc->nodes);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003205 incoming_refs = binder_node_release(node, incoming_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003206 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003207
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003208 outgoing_refs = 0;
3209 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003210 struct binder_ref *ref;
3211
3212 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003213 outgoing_refs++;
3214 binder_delete_ref(ref);
3215 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003216
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003217 binder_release_work(&proc->todo);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003218 binder_release_work(&proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003219
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003220 buffers = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003221 while ((n = rb_first(&proc->allocated_buffers))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003222 struct binder_buffer *buffer;
3223
3224 buffer = rb_entry(n, struct binder_buffer, rb_node);
3225
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003226 t = buffer->transaction;
3227 if (t) {
3228 t->buffer = NULL;
3229 buffer->transaction = NULL;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303230 pr_err("release proc %d, transaction %d, not freed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003231 proc->pid, t->debug_id);
3232 /*BUG();*/
3233 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003234
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003235 binder_free_buf(proc, buffer);
3236 buffers++;
3237 }
3238
3239 binder_stats_deleted(BINDER_STAT_PROC);
3240
3241 page_count = 0;
3242 if (proc->pages) {
3243 int i;
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003244
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003245 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
Mirsal Ennaimeba97bc52013-03-12 11:42:01 +01003246 void *page_addr;
3247
3248 if (!proc->pages[i])
3249 continue;
3250
3251 page_addr = proc->buffer + i * PAGE_SIZE;
3252 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003253 "%s: %d: page %d at %p not freed\n",
3254 __func__, proc->pid, i, page_addr);
Mirsal Ennaimeba97bc52013-03-12 11:42:01 +01003255 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3256 __free_page(proc->pages[i]);
3257 page_count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003258 }
3259 kfree(proc->pages);
3260 vfree(proc->buffer);
3261 }
3262
3263 put_task_struct(proc->tsk);
3264
3265 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003266 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3267 __func__, proc->pid, threads, nodes, incoming_refs,
3268 outgoing_refs, active_transactions, buffers, page_count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003269
3270 kfree(proc);
3271}
3272
3273static void binder_deferred_func(struct work_struct *work)
3274{
3275 struct binder_proc *proc;
3276 struct files_struct *files;
3277
3278 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09003279
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003280 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003281 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003282 mutex_lock(&binder_deferred_lock);
3283 if (!hlist_empty(&binder_deferred_list)) {
3284 proc = hlist_entry(binder_deferred_list.first,
3285 struct binder_proc, deferred_work_node);
3286 hlist_del_init(&proc->deferred_work_node);
3287 defer = proc->deferred_work;
3288 proc->deferred_work = 0;
3289 } else {
3290 proc = NULL;
3291 defer = 0;
3292 }
3293 mutex_unlock(&binder_deferred_lock);
3294
3295 files = NULL;
3296 if (defer & BINDER_DEFERRED_PUT_FILES) {
3297 files = proc->files;
3298 if (files)
3299 proc->files = NULL;
3300 }
3301
3302 if (defer & BINDER_DEFERRED_FLUSH)
3303 binder_deferred_flush(proc);
3304
3305 if (defer & BINDER_DEFERRED_RELEASE)
3306 binder_deferred_release(proc); /* frees proc */
3307
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003308 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003309 if (files)
3310 put_files_struct(files);
3311 } while (proc);
3312}
3313static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3314
3315static void
3316binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3317{
3318 mutex_lock(&binder_deferred_lock);
3319 proc->deferred_work |= defer;
3320 if (hlist_unhashed(&proc->deferred_work_node)) {
3321 hlist_add_head(&proc->deferred_work_node,
3322 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05303323 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003324 }
3325 mutex_unlock(&binder_deferred_lock);
3326}
3327
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003328static void print_binder_transaction(struct seq_file *m, const char *prefix,
3329 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003330{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003331 seq_printf(m,
3332 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3333 prefix, t->debug_id, t,
3334 t->from ? t->from->proc->pid : 0,
3335 t->from ? t->from->pid : 0,
3336 t->to_proc ? t->to_proc->pid : 0,
3337 t->to_thread ? t->to_thread->pid : 0,
3338 t->code, t->flags, t->priority, t->need_reply);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003339 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003340 seq_puts(m, " buffer free\n");
3341 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003342 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003343 if (t->buffer->target_node)
3344 seq_printf(m, " node %d",
3345 t->buffer->target_node->debug_id);
3346 seq_printf(m, " size %zd:%zd data %p\n",
3347 t->buffer->data_size, t->buffer->offsets_size,
3348 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003349}
3350
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003351static void print_binder_buffer(struct seq_file *m, const char *prefix,
3352 struct binder_buffer *buffer)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003353{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003354 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3355 prefix, buffer->debug_id, buffer->data,
3356 buffer->data_size, buffer->offsets_size,
3357 buffer->transaction ? "active" : "delivered");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003358}
3359
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003360static void print_binder_work(struct seq_file *m, const char *prefix,
3361 const char *transaction_prefix,
3362 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003363{
3364 struct binder_node *node;
3365 struct binder_transaction *t;
3366
3367 switch (w->type) {
3368 case BINDER_WORK_TRANSACTION:
3369 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003370 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003371 break;
3372 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003373 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003374 break;
3375 case BINDER_WORK_NODE:
3376 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003377 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3378 prefix, node->debug_id,
3379 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003380 break;
3381 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003382 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003383 break;
3384 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003385 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003386 break;
3387 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003388 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003389 break;
3390 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003391 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003392 break;
3393 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003394}
3395
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003396static void print_binder_thread(struct seq_file *m,
3397 struct binder_thread *thread,
3398 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003399{
3400 struct binder_transaction *t;
3401 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003402 size_t start_pos = m->count;
3403 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003404
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003405 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3406 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003407 t = thread->transaction_stack;
3408 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003409 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003410 print_binder_transaction(m,
3411 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003412 t = t->from_parent;
3413 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003414 print_binder_transaction(m,
3415 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003416 t = t->to_parent;
3417 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003418 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003419 t = NULL;
3420 }
3421 }
3422 list_for_each_entry(w, &thread->todo, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003423 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003424 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003425 if (!print_always && m->count == header_pos)
3426 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003427}
3428
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003429static void print_binder_node(struct seq_file *m, struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003430{
3431 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003432 struct binder_work *w;
3433 int count;
3434
3435 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08003436 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003437 count++;
3438
Arve Hjønnevågda498892014-02-21 14:40:26 -08003439 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3440 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003441 node->has_strong_ref, node->has_weak_ref,
3442 node->local_strong_refs, node->local_weak_refs,
3443 node->internal_strong_refs, count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003444 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003445 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003446 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003447 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003448 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003449 seq_puts(m, "\n");
3450 list_for_each_entry(w, &node->async_todo, entry)
3451 print_binder_work(m, " ",
3452 " pending async transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003453}
3454
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003455static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003456{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003457 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3458 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3459 ref->node->debug_id, ref->strong, ref->weak, ref->death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003460}
3461
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003462static void print_binder_proc(struct seq_file *m,
3463 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003464{
3465 struct binder_work *w;
3466 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003467 size_t start_pos = m->count;
3468 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003469
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003470 seq_printf(m, "proc %d\n", proc->pid);
3471 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003472
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003473 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3474 print_binder_thread(m, rb_entry(n, struct binder_thread,
3475 rb_node), print_all);
3476 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003477 struct binder_node *node = rb_entry(n, struct binder_node,
3478 rb_node);
3479 if (print_all || node->has_async_transaction)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003480 print_binder_node(m, node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003481 }
3482 if (print_all) {
3483 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003484 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003485 n = rb_next(n))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003486 print_binder_ref(m, rb_entry(n, struct binder_ref,
3487 rb_node_desc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003488 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003489 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3490 print_binder_buffer(m, " buffer",
3491 rb_entry(n, struct binder_buffer, rb_node));
3492 list_for_each_entry(w, &proc->todo, entry)
3493 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003494 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003495 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003496 break;
3497 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003498 if (!print_all && m->count == header_pos)
3499 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003500}
3501
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003502static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003503 "BR_ERROR",
3504 "BR_OK",
3505 "BR_TRANSACTION",
3506 "BR_REPLY",
3507 "BR_ACQUIRE_RESULT",
3508 "BR_DEAD_REPLY",
3509 "BR_TRANSACTION_COMPLETE",
3510 "BR_INCREFS",
3511 "BR_ACQUIRE",
3512 "BR_RELEASE",
3513 "BR_DECREFS",
3514 "BR_ATTEMPT_ACQUIRE",
3515 "BR_NOOP",
3516 "BR_SPAWN_LOOPER",
3517 "BR_FINISHED",
3518 "BR_DEAD_BINDER",
3519 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3520 "BR_FAILED_REPLY"
3521};
3522
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003523static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003524 "BC_TRANSACTION",
3525 "BC_REPLY",
3526 "BC_ACQUIRE_RESULT",
3527 "BC_FREE_BUFFER",
3528 "BC_INCREFS",
3529 "BC_ACQUIRE",
3530 "BC_RELEASE",
3531 "BC_DECREFS",
3532 "BC_INCREFS_DONE",
3533 "BC_ACQUIRE_DONE",
3534 "BC_ATTEMPT_ACQUIRE",
3535 "BC_REGISTER_LOOPER",
3536 "BC_ENTER_LOOPER",
3537 "BC_EXIT_LOOPER",
3538 "BC_REQUEST_DEATH_NOTIFICATION",
3539 "BC_CLEAR_DEATH_NOTIFICATION",
3540 "BC_DEAD_BINDER_DONE"
3541};
3542
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003543static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003544 "proc",
3545 "thread",
3546 "node",
3547 "ref",
3548 "death",
3549 "transaction",
3550 "transaction_complete"
3551};
3552
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003553static void print_binder_stats(struct seq_file *m, const char *prefix,
3554 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003555{
3556 int i;
3557
3558 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003559 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003560 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3561 if (stats->bc[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003562 seq_printf(m, "%s%s: %d\n", prefix,
3563 binder_command_strings[i], stats->bc[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003564 }
3565
3566 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003567 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003568 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3569 if (stats->br[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003570 seq_printf(m, "%s%s: %d\n", prefix,
3571 binder_return_strings[i], stats->br[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003572 }
3573
3574 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003575 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003576 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003577 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003578 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3579 if (stats->obj_created[i] || stats->obj_deleted[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003580 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3581 binder_objstat_strings[i],
3582 stats->obj_created[i] - stats->obj_deleted[i],
3583 stats->obj_created[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003584 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003585}
3586
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003587static void print_binder_proc_stats(struct seq_file *m,
3588 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003589{
3590 struct binder_work *w;
3591 struct rb_node *n;
3592 int count, strong, weak;
3593
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003594 seq_printf(m, "proc %d\n", proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003595 count = 0;
3596 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3597 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003598 seq_printf(m, " threads: %d\n", count);
3599 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003600 " ready threads %d\n"
3601 " free async space %zd\n", proc->requested_threads,
3602 proc->requested_threads_started, proc->max_threads,
3603 proc->ready_threads, proc->free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003604 count = 0;
3605 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3606 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003607 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003608 count = 0;
3609 strong = 0;
3610 weak = 0;
3611 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3612 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3613 rb_node_desc);
3614 count++;
3615 strong += ref->strong;
3616 weak += ref->weak;
3617 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003618 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003619
3620 count = 0;
3621 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3622 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003623 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003624
3625 count = 0;
3626 list_for_each_entry(w, &proc->todo, entry) {
3627 switch (w->type) {
3628 case BINDER_WORK_TRANSACTION:
3629 count++;
3630 break;
3631 default:
3632 break;
3633 }
3634 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003635 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003636
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003637 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003638}
3639
3640
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003641static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003642{
3643 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003644 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003645 int do_lock = !binder_debug_no_lock;
3646
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003647 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003648 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003649
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003650 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003651
3652 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003653 seq_puts(m, "dead nodes:\n");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003654 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003655 print_binder_node(m, node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003656
Sasha Levinb67bfe02013-02-27 17:06:00 -08003657 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003658 print_binder_proc(m, proc, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003659 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003660 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003661 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003662}
3663
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003664static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003665{
3666 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003667 int do_lock = !binder_debug_no_lock;
3668
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003669 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003670 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003671
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003672 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003673
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003674 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003675
Sasha Levinb67bfe02013-02-27 17:06:00 -08003676 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003677 print_binder_proc_stats(m, proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003678 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003679 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003680 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003681}
3682
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003683static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003684{
3685 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003686 int do_lock = !binder_debug_no_lock;
3687
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003688 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003689 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003690
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003691 seq_puts(m, "binder transactions:\n");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003692 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003693 print_binder_proc(m, proc, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003694 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003695 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003696 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003697}
3698
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003699static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003700{
Riley Andrews83050a42016-02-09 21:05:33 -08003701 struct binder_proc *itr;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003702 struct binder_proc *proc = m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003703 int do_lock = !binder_debug_no_lock;
Riley Andrews83050a42016-02-09 21:05:33 -08003704 bool valid_proc = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003705
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003706 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003707 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08003708
3709 hlist_for_each_entry(itr, &binder_procs, proc_node) {
3710 if (itr == proc) {
3711 valid_proc = true;
3712 break;
3713 }
3714 }
3715 if (valid_proc) {
3716 seq_puts(m, "binder proc state:\n");
3717 print_binder_proc(m, proc, 1);
3718 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003719 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003720 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003721 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003722}
3723
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003724static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003725 struct binder_transaction_log_entry *e)
3726{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003727 seq_printf(m,
3728 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3729 e->debug_id, (e->call_type == 2) ? "reply" :
3730 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3731 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3732 e->target_handle, e->data_size, e->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003733}
3734
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003735static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003736{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003737 struct binder_transaction_log *log = m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003738 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003739
3740 if (log->full) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003741 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3742 print_binder_transaction_log_entry(m, &log->entry[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003743 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003744 for (i = 0; i < log->next; i++)
3745 print_binder_transaction_log_entry(m, &log->entry[i]);
3746 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003747}
3748
3749static const struct file_operations binder_fops = {
3750 .owner = THIS_MODULE,
3751 .poll = binder_poll,
3752 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003753 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003754 .mmap = binder_mmap,
3755 .open = binder_open,
3756 .flush = binder_flush,
3757 .release = binder_release,
3758};
3759
3760static struct miscdevice binder_miscdev = {
3761 .minor = MISC_DYNAMIC_MINOR,
3762 .name = "binder",
3763 .fops = &binder_fops
3764};
3765
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003766BINDER_DEBUG_ENTRY(state);
3767BINDER_DEBUG_ENTRY(stats);
3768BINDER_DEBUG_ENTRY(transactions);
3769BINDER_DEBUG_ENTRY(transaction_log);
3770
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003771static int __init binder_init(void)
3772{
3773 int ret;
3774
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003775 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3776 if (binder_debugfs_dir_entry_root)
3777 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3778 binder_debugfs_dir_entry_root);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003779 ret = misc_register(&binder_miscdev);
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003780 if (binder_debugfs_dir_entry_root) {
3781 debugfs_create_file("state",
3782 S_IRUGO,
3783 binder_debugfs_dir_entry_root,
3784 NULL,
3785 &binder_state_fops);
3786 debugfs_create_file("stats",
3787 S_IRUGO,
3788 binder_debugfs_dir_entry_root,
3789 NULL,
3790 &binder_stats_fops);
3791 debugfs_create_file("transactions",
3792 S_IRUGO,
3793 binder_debugfs_dir_entry_root,
3794 NULL,
3795 &binder_transactions_fops);
3796 debugfs_create_file("transaction_log",
3797 S_IRUGO,
3798 binder_debugfs_dir_entry_root,
3799 &binder_transaction_log,
3800 &binder_transaction_log_fops);
3801 debugfs_create_file("failed_transaction_log",
3802 S_IRUGO,
3803 binder_debugfs_dir_entry_root,
3804 &binder_transaction_log_failed,
3805 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003806 }
3807 return ret;
3808}
3809
3810device_initcall(binder_init);
3811
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003812#define CREATE_TRACE_POINTS
3813#include "binder_trace.h"
3814
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003815MODULE_LICENSE("GPL v2");