blob: 10fda7ab9fa5ccc32cb6cecf515af8e84a15bf33 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Anmol Sarma56b468f2012-10-30 22:35:43 +053018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090020#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000023#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090024#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090027#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/nsproxy.h>
30#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070031#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090032#include <linux/rbtree.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010033#include <linux/sched/signal.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010034#include <linux/sched/mm.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070035#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090036#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080037#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050038#include <linux/security.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090039
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020040#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
41#define BINDER_IPC_32BIT 1
42#endif
43
44#include <uapi/linux/android/binder.h>
Todd Kjos0c972a02017-06-29 12:01:41 -070045#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070046#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090047
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070048static DEFINE_MUTEX(binder_main_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090049static DEFINE_MUTEX(binder_deferred_lock);
50
Martijn Coenenac4812c2017-02-03 14:40:48 -080051static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090052static HLIST_HEAD(binder_procs);
53static HLIST_HEAD(binder_deferred_list);
54static HLIST_HEAD(binder_dead_nodes);
55
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070056static struct dentry *binder_debugfs_dir_entry_root;
57static struct dentry *binder_debugfs_dir_entry_proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058static int binder_last_id;
59
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070060#define BINDER_DEBUG_ENTRY(name) \
61static int binder_##name##_open(struct inode *inode, struct file *file) \
62{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070063 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070064} \
65\
66static const struct file_operations binder_##name##_fops = { \
67 .owner = THIS_MODULE, \
68 .open = binder_##name##_open, \
69 .read = seq_read, \
70 .llseek = seq_lseek, \
71 .release = single_release, \
72}
73
74static int binder_proc_show(struct seq_file *m, void *unused);
75BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090076
77/* This is only defined in include/asm-arm/sizes.h */
78#ifndef SZ_1K
79#define SZ_1K 0x400
80#endif
81
82#ifndef SZ_4M
83#define SZ_4M 0x400000
84#endif
85
86#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
87
88#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
89
90enum {
91 BINDER_DEBUG_USER_ERROR = 1U << 0,
92 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
93 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
94 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
95 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
96 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
97 BINDER_DEBUG_READ_WRITE = 1U << 6,
98 BINDER_DEBUG_USER_REFS = 1U << 7,
99 BINDER_DEBUG_THREADS = 1U << 8,
100 BINDER_DEBUG_TRANSACTION = 1U << 9,
101 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
102 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
103 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjos19c98722017-06-29 12:01:40 -0700104 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900105};
106static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
107 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
108module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
109
Martijn Coenenac4812c2017-02-03 14:40:48 -0800110static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
111module_param_named(devices, binder_devices_param, charp, 0444);
112
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900113static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
114static int binder_stop_on_user_error;
115
116static int binder_set_stop_on_user_error(const char *val,
117 struct kernel_param *kp)
118{
119 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900120
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900121 ret = param_set_int(val, kp);
122 if (binder_stop_on_user_error < 2)
123 wake_up(&binder_user_error_wait);
124 return ret;
125}
126module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
127 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
128
129#define binder_debug(mask, x...) \
130 do { \
131 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400132 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900133 } while (0)
134
135#define binder_user_error(x...) \
136 do { \
137 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400138 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900139 if (binder_stop_on_user_error) \
140 binder_stop_on_user_error = 2; \
141 } while (0)
142
Martijn Coenenfeba3902017-02-03 14:40:45 -0800143#define to_flat_binder_object(hdr) \
144 container_of(hdr, struct flat_binder_object, hdr)
145
146#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
147
Martijn Coenen79802402017-02-03 14:40:51 -0800148#define to_binder_buffer_object(hdr) \
149 container_of(hdr, struct binder_buffer_object, hdr)
150
Martijn Coenendef95c72017-02-03 14:40:52 -0800151#define to_binder_fd_array_object(hdr) \
152 container_of(hdr, struct binder_fd_array_object, hdr)
153
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900154enum binder_stat_types {
155 BINDER_STAT_PROC,
156 BINDER_STAT_THREAD,
157 BINDER_STAT_NODE,
158 BINDER_STAT_REF,
159 BINDER_STAT_DEATH,
160 BINDER_STAT_TRANSACTION,
161 BINDER_STAT_TRANSACTION_COMPLETE,
162 BINDER_STAT_COUNT
163};
164
165struct binder_stats {
166 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
Martijn Coenen79802402017-02-03 14:40:51 -0800167 int bc[_IOC_NR(BC_REPLY_SG) + 1];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900168 int obj_created[BINDER_STAT_COUNT];
169 int obj_deleted[BINDER_STAT_COUNT];
170};
171
172static struct binder_stats binder_stats;
173
174static inline void binder_stats_deleted(enum binder_stat_types type)
175{
176 binder_stats.obj_deleted[type]++;
177}
178
179static inline void binder_stats_created(enum binder_stat_types type)
180{
181 binder_stats.obj_created[type]++;
182}
183
184struct binder_transaction_log_entry {
185 int debug_id;
186 int call_type;
187 int from_proc;
188 int from_thread;
189 int target_handle;
190 int to_proc;
191 int to_thread;
192 int to_node;
193 int data_size;
194 int offsets_size;
Martijn Coenen14db3182017-02-03 14:40:47 -0800195 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900196};
197struct binder_transaction_log {
198 int next;
199 int full;
200 struct binder_transaction_log_entry entry[32];
201};
202static struct binder_transaction_log binder_transaction_log;
203static struct binder_transaction_log binder_transaction_log_failed;
204
205static struct binder_transaction_log_entry *binder_transaction_log_add(
206 struct binder_transaction_log *log)
207{
208 struct binder_transaction_log_entry *e;
Seunghun Lee10f62862014-05-01 01:30:23 +0900209
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900210 e = &log->entry[log->next];
211 memset(e, 0, sizeof(*e));
212 log->next++;
213 if (log->next == ARRAY_SIZE(log->entry)) {
214 log->next = 0;
215 log->full = 1;
216 }
217 return e;
218}
219
Martijn Coenen342e5c92017-02-03 14:40:46 -0800220struct binder_context {
221 struct binder_node *binder_context_mgr_node;
222 kuid_t binder_context_mgr_uid;
Martijn Coenen14db3182017-02-03 14:40:47 -0800223 const char *name;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800224};
225
Martijn Coenenac4812c2017-02-03 14:40:48 -0800226struct binder_device {
227 struct hlist_node hlist;
228 struct miscdevice miscdev;
229 struct binder_context context;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800230};
231
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900232struct binder_work {
233 struct list_head entry;
234 enum {
235 BINDER_WORK_TRANSACTION = 1,
236 BINDER_WORK_TRANSACTION_COMPLETE,
237 BINDER_WORK_NODE,
238 BINDER_WORK_DEAD_BINDER,
239 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
240 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
241 } type;
242};
243
244struct binder_node {
245 int debug_id;
246 struct binder_work work;
247 union {
248 struct rb_node rb_node;
249 struct hlist_node dead_node;
250 };
251 struct binder_proc *proc;
252 struct hlist_head refs;
253 int internal_strong_refs;
254 int local_weak_refs;
255 int local_strong_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800256 binder_uintptr_t ptr;
257 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900258 unsigned has_strong_ref:1;
259 unsigned pending_strong_ref:1;
260 unsigned has_weak_ref:1;
261 unsigned pending_weak_ref:1;
262 unsigned has_async_transaction:1;
263 unsigned accept_fds:1;
264 unsigned min_priority:8;
265 struct list_head async_todo;
266};
267
268struct binder_ref_death {
269 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800270 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900271};
272
273struct binder_ref {
274 /* Lookups needed: */
275 /* node + proc => ref (transaction) */
276 /* desc + proc => ref (transaction, inc/dec ref) */
277 /* node => refs + procs (proc exit) */
278 int debug_id;
279 struct rb_node rb_node_desc;
280 struct rb_node rb_node_node;
281 struct hlist_node node_entry;
282 struct binder_proc *proc;
283 struct binder_node *node;
284 uint32_t desc;
285 int strong;
286 int weak;
287 struct binder_ref_death *death;
288};
289
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900290enum binder_deferred_state {
291 BINDER_DEFERRED_PUT_FILES = 0x01,
292 BINDER_DEFERRED_FLUSH = 0x02,
293 BINDER_DEFERRED_RELEASE = 0x04,
294};
295
296struct binder_proc {
297 struct hlist_node proc_node;
298 struct rb_root threads;
299 struct rb_root nodes;
300 struct rb_root refs_by_desc;
301 struct rb_root refs_by_node;
302 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900303 struct task_struct *tsk;
304 struct files_struct *files;
305 struct hlist_node deferred_work_node;
306 int deferred_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900307
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900308 struct list_head todo;
309 wait_queue_head_t wait;
310 struct binder_stats stats;
311 struct list_head delivered_death;
312 int max_threads;
313 int requested_threads;
314 int requested_threads_started;
315 int ready_threads;
316 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700317 struct dentry *debugfs_entry;
Todd Kjosfdfb4a92017-06-29 12:01:38 -0700318 struct binder_alloc alloc;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800319 struct binder_context *context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900320};
321
322enum {
323 BINDER_LOOPER_STATE_REGISTERED = 0x01,
324 BINDER_LOOPER_STATE_ENTERED = 0x02,
325 BINDER_LOOPER_STATE_EXITED = 0x04,
326 BINDER_LOOPER_STATE_INVALID = 0x08,
327 BINDER_LOOPER_STATE_WAITING = 0x10,
328 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
329};
330
331struct binder_thread {
332 struct binder_proc *proc;
333 struct rb_node rb_node;
334 int pid;
335 int looper;
336 struct binder_transaction *transaction_stack;
337 struct list_head todo;
338 uint32_t return_error; /* Write failed, return error code in read buf */
339 uint32_t return_error2; /* Write failed, return error code in read */
340 /* buffer. Used when sending a reply to a dead process that */
341 /* we are also waiting on */
342 wait_queue_head_t wait;
343 struct binder_stats stats;
344};
345
346struct binder_transaction {
347 int debug_id;
348 struct binder_work work;
349 struct binder_thread *from;
350 struct binder_transaction *from_parent;
351 struct binder_proc *to_proc;
352 struct binder_thread *to_thread;
353 struct binder_transaction *to_parent;
354 unsigned need_reply:1;
355 /* unsigned is_dead:1; */ /* not used at the moment */
356
357 struct binder_buffer *buffer;
358 unsigned int code;
359 unsigned int flags;
360 long priority;
361 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600362 kuid_t sender_euid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900363};
364
365static void
366binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
367
Sachin Kamatefde99c2012-08-17 16:39:36 +0530368static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900369{
370 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900371 unsigned long rlim_cur;
372 unsigned long irqs;
373
374 if (files == NULL)
375 return -ESRCH;
376
Al Virodcfadfa2012-08-12 17:27:30 -0400377 if (!lock_task_sighand(proc->tsk, &irqs))
378 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900379
Al Virodcfadfa2012-08-12 17:27:30 -0400380 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
381 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900382
Al Virodcfadfa2012-08-12 17:27:30 -0400383 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900384}
385
386/*
387 * copied from fd_install
388 */
389static void task_fd_install(
390 struct binder_proc *proc, unsigned int fd, struct file *file)
391{
Al Virof869e8a2012-08-15 21:06:33 -0400392 if (proc->files)
393 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900394}
395
396/*
397 * copied from sys_close
398 */
399static long task_close_fd(struct binder_proc *proc, unsigned int fd)
400{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900401 int retval;
402
Al Viro483ce1d2012-08-19 12:04:24 -0400403 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900404 return -ESRCH;
405
Al Viro483ce1d2012-08-19 12:04:24 -0400406 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900407 /* can't restart close syscall because file table entry was cleared */
408 if (unlikely(retval == -ERESTARTSYS ||
409 retval == -ERESTARTNOINTR ||
410 retval == -ERESTARTNOHAND ||
411 retval == -ERESTART_RESTARTBLOCK))
412 retval = -EINTR;
413
414 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900415}
416
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700417static inline void binder_lock(const char *tag)
418{
419 trace_binder_lock(tag);
420 mutex_lock(&binder_main_lock);
421 trace_binder_locked(tag);
422}
423
424static inline void binder_unlock(const char *tag)
425{
426 trace_binder_unlock(tag);
427 mutex_unlock(&binder_main_lock);
428}
429
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900430static void binder_set_nice(long nice)
431{
432 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900433
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900434 if (can_nice(current, nice)) {
435 set_user_nice(current, nice);
436 return;
437 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900438 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900439 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530440 "%d: nice value %ld not allowed use %ld instead\n",
441 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900442 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800443 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900444 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530445 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900446}
447
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900448static struct binder_node *binder_get_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800449 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900450{
451 struct rb_node *n = proc->nodes.rb_node;
452 struct binder_node *node;
453
454 while (n) {
455 node = rb_entry(n, struct binder_node, rb_node);
456
457 if (ptr < node->ptr)
458 n = n->rb_left;
459 else if (ptr > node->ptr)
460 n = n->rb_right;
461 else
462 return node;
463 }
464 return NULL;
465}
466
467static struct binder_node *binder_new_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800468 binder_uintptr_t ptr,
469 binder_uintptr_t cookie)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900470{
471 struct rb_node **p = &proc->nodes.rb_node;
472 struct rb_node *parent = NULL;
473 struct binder_node *node;
474
475 while (*p) {
476 parent = *p;
477 node = rb_entry(parent, struct binder_node, rb_node);
478
479 if (ptr < node->ptr)
480 p = &(*p)->rb_left;
481 else if (ptr > node->ptr)
482 p = &(*p)->rb_right;
483 else
484 return NULL;
485 }
486
487 node = kzalloc(sizeof(*node), GFP_KERNEL);
488 if (node == NULL)
489 return NULL;
490 binder_stats_created(BINDER_STAT_NODE);
491 rb_link_node(&node->rb_node, parent, p);
492 rb_insert_color(&node->rb_node, &proc->nodes);
493 node->debug_id = ++binder_last_id;
494 node->proc = proc;
495 node->ptr = ptr;
496 node->cookie = cookie;
497 node->work.type = BINDER_WORK_NODE;
498 INIT_LIST_HEAD(&node->work.entry);
499 INIT_LIST_HEAD(&node->async_todo);
500 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800501 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900502 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800503 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900504 return node;
505}
506
507static int binder_inc_node(struct binder_node *node, int strong, int internal,
508 struct list_head *target_list)
509{
510 if (strong) {
511 if (internal) {
512 if (target_list == NULL &&
513 node->internal_strong_refs == 0 &&
Martijn Coenen342e5c92017-02-03 14:40:46 -0800514 !(node->proc &&
515 node == node->proc->context->binder_context_mgr_node &&
516 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530517 pr_err("invalid inc strong node for %d\n",
518 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900519 return -EINVAL;
520 }
521 node->internal_strong_refs++;
522 } else
523 node->local_strong_refs++;
524 if (!node->has_strong_ref && target_list) {
525 list_del_init(&node->work.entry);
526 list_add_tail(&node->work.entry, target_list);
527 }
528 } else {
529 if (!internal)
530 node->local_weak_refs++;
531 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
532 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530533 pr_err("invalid inc weak node for %d\n",
534 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900535 return -EINVAL;
536 }
537 list_add_tail(&node->work.entry, target_list);
538 }
539 }
540 return 0;
541}
542
543static int binder_dec_node(struct binder_node *node, int strong, int internal)
544{
545 if (strong) {
546 if (internal)
547 node->internal_strong_refs--;
548 else
549 node->local_strong_refs--;
550 if (node->local_strong_refs || node->internal_strong_refs)
551 return 0;
552 } else {
553 if (!internal)
554 node->local_weak_refs--;
555 if (node->local_weak_refs || !hlist_empty(&node->refs))
556 return 0;
557 }
558 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
559 if (list_empty(&node->work.entry)) {
560 list_add_tail(&node->work.entry, &node->proc->todo);
561 wake_up_interruptible(&node->proc->wait);
562 }
563 } else {
564 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
565 !node->local_weak_refs) {
566 list_del_init(&node->work.entry);
567 if (node->proc) {
568 rb_erase(&node->rb_node, &node->proc->nodes);
569 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530570 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900571 node->debug_id);
572 } else {
573 hlist_del(&node->dead_node);
574 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530575 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900576 node->debug_id);
577 }
578 kfree(node);
579 binder_stats_deleted(BINDER_STAT_NODE);
580 }
581 }
582
583 return 0;
584}
585
586
587static struct binder_ref *binder_get_ref(struct binder_proc *proc,
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200588 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900589{
590 struct rb_node *n = proc->refs_by_desc.rb_node;
591 struct binder_ref *ref;
592
593 while (n) {
594 ref = rb_entry(n, struct binder_ref, rb_node_desc);
595
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200596 if (desc < ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900597 n = n->rb_left;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200598 } else if (desc > ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900599 n = n->rb_right;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200600 } else if (need_strong_ref && !ref->strong) {
601 binder_user_error("tried to use weak ref as strong ref\n");
602 return NULL;
603 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900604 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200605 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900606 }
607 return NULL;
608}
609
610static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
611 struct binder_node *node)
612{
613 struct rb_node *n;
614 struct rb_node **p = &proc->refs_by_node.rb_node;
615 struct rb_node *parent = NULL;
616 struct binder_ref *ref, *new_ref;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800617 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900618
619 while (*p) {
620 parent = *p;
621 ref = rb_entry(parent, struct binder_ref, rb_node_node);
622
623 if (node < ref->node)
624 p = &(*p)->rb_left;
625 else if (node > ref->node)
626 p = &(*p)->rb_right;
627 else
628 return ref;
629 }
630 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
631 if (new_ref == NULL)
632 return NULL;
633 binder_stats_created(BINDER_STAT_REF);
634 new_ref->debug_id = ++binder_last_id;
635 new_ref->proc = proc;
636 new_ref->node = node;
637 rb_link_node(&new_ref->rb_node_node, parent, p);
638 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
639
Martijn Coenen342e5c92017-02-03 14:40:46 -0800640 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900641 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
642 ref = rb_entry(n, struct binder_ref, rb_node_desc);
643 if (ref->desc > new_ref->desc)
644 break;
645 new_ref->desc = ref->desc + 1;
646 }
647
648 p = &proc->refs_by_desc.rb_node;
649 while (*p) {
650 parent = *p;
651 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
652
653 if (new_ref->desc < ref->desc)
654 p = &(*p)->rb_left;
655 else if (new_ref->desc > ref->desc)
656 p = &(*p)->rb_right;
657 else
658 BUG();
659 }
660 rb_link_node(&new_ref->rb_node_desc, parent, p);
661 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
662 if (node) {
663 hlist_add_head(&new_ref->node_entry, &node->refs);
664
665 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530666 "%d new ref %d desc %d for node %d\n",
667 proc->pid, new_ref->debug_id, new_ref->desc,
668 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900669 } else {
670 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530671 "%d new ref %d desc %d for dead node\n",
672 proc->pid, new_ref->debug_id, new_ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900673 }
674 return new_ref;
675}
676
677static void binder_delete_ref(struct binder_ref *ref)
678{
679 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530680 "%d delete ref %d desc %d for node %d\n",
681 ref->proc->pid, ref->debug_id, ref->desc,
682 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900683
684 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
685 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
686 if (ref->strong)
687 binder_dec_node(ref->node, 1, 1);
688 hlist_del(&ref->node_entry);
689 binder_dec_node(ref->node, 0, 1);
690 if (ref->death) {
691 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530692 "%d delete ref %d desc %d has death notification\n",
693 ref->proc->pid, ref->debug_id, ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900694 list_del(&ref->death->work.entry);
695 kfree(ref->death);
696 binder_stats_deleted(BINDER_STAT_DEATH);
697 }
698 kfree(ref);
699 binder_stats_deleted(BINDER_STAT_REF);
700}
701
702static int binder_inc_ref(struct binder_ref *ref, int strong,
703 struct list_head *target_list)
704{
705 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900706
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900707 if (strong) {
708 if (ref->strong == 0) {
709 ret = binder_inc_node(ref->node, 1, 1, target_list);
710 if (ret)
711 return ret;
712 }
713 ref->strong++;
714 } else {
715 if (ref->weak == 0) {
716 ret = binder_inc_node(ref->node, 0, 1, target_list);
717 if (ret)
718 return ret;
719 }
720 ref->weak++;
721 }
722 return 0;
723}
724
725
726static int binder_dec_ref(struct binder_ref *ref, int strong)
727{
728 if (strong) {
729 if (ref->strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530730 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900731 ref->proc->pid, ref->debug_id,
732 ref->desc, ref->strong, ref->weak);
733 return -EINVAL;
734 }
735 ref->strong--;
736 if (ref->strong == 0) {
737 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900738
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900739 ret = binder_dec_node(ref->node, strong, 1);
740 if (ret)
741 return ret;
742 }
743 } else {
744 if (ref->weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530745 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900746 ref->proc->pid, ref->debug_id,
747 ref->desc, ref->strong, ref->weak);
748 return -EINVAL;
749 }
750 ref->weak--;
751 }
752 if (ref->strong == 0 && ref->weak == 0)
753 binder_delete_ref(ref);
754 return 0;
755}
756
757static void binder_pop_transaction(struct binder_thread *target_thread,
758 struct binder_transaction *t)
759{
760 if (target_thread) {
761 BUG_ON(target_thread->transaction_stack != t);
762 BUG_ON(target_thread->transaction_stack->from != target_thread);
763 target_thread->transaction_stack =
764 target_thread->transaction_stack->from_parent;
765 t->from = NULL;
766 }
767 t->need_reply = 0;
768 if (t->buffer)
769 t->buffer->transaction = NULL;
770 kfree(t);
771 binder_stats_deleted(BINDER_STAT_TRANSACTION);
772}
773
774static void binder_send_failed_reply(struct binder_transaction *t,
775 uint32_t error_code)
776{
777 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -0300778 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +0900779
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900780 BUG_ON(t->flags & TF_ONE_WAY);
781 while (1) {
782 target_thread = t->from;
783 if (target_thread) {
784 if (target_thread->return_error != BR_OK &&
785 target_thread->return_error2 == BR_OK) {
786 target_thread->return_error2 =
787 target_thread->return_error;
788 target_thread->return_error = BR_OK;
789 }
790 if (target_thread->return_error == BR_OK) {
791 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530792 "send failed reply for transaction %d to %d:%d\n",
William Panlener0232a422014-09-03 22:44:03 -0500793 t->debug_id,
794 target_thread->proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900795 target_thread->pid);
796
797 binder_pop_transaction(target_thread, t);
798 target_thread->return_error = error_code;
799 wake_up_interruptible(&target_thread->wait);
800 } else {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530801 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
802 target_thread->proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900803 target_thread->pid,
804 target_thread->return_error);
805 }
806 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900807 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -0300808 next = t->from_parent;
809
810 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
811 "send failed reply for transaction %d, target dead\n",
812 t->debug_id);
813
814 binder_pop_transaction(target_thread, t);
815 if (next == NULL) {
816 binder_debug(BINDER_DEBUG_DEAD_BINDER,
817 "reply failed, no target thread at root\n");
818 return;
819 }
820 t = next;
821 binder_debug(BINDER_DEBUG_DEAD_BINDER,
822 "reply failed, no target thread -- retry %d\n",
823 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900824 }
825}
826
Martijn Coenenfeba3902017-02-03 14:40:45 -0800827/**
828 * binder_validate_object() - checks for a valid metadata object in a buffer.
829 * @buffer: binder_buffer that we're parsing.
830 * @offset: offset in the buffer at which to validate an object.
831 *
832 * Return: If there's a valid metadata object at @offset in @buffer, the
833 * size of that object. Otherwise, it returns zero.
834 */
835static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
836{
837 /* Check if we can read a header first */
838 struct binder_object_header *hdr;
839 size_t object_size = 0;
840
841 if (offset > buffer->data_size - sizeof(*hdr) ||
842 buffer->data_size < sizeof(*hdr) ||
843 !IS_ALIGNED(offset, sizeof(u32)))
844 return 0;
845
846 /* Ok, now see if we can read a complete object. */
847 hdr = (struct binder_object_header *)(buffer->data + offset);
848 switch (hdr->type) {
849 case BINDER_TYPE_BINDER:
850 case BINDER_TYPE_WEAK_BINDER:
851 case BINDER_TYPE_HANDLE:
852 case BINDER_TYPE_WEAK_HANDLE:
853 object_size = sizeof(struct flat_binder_object);
854 break;
855 case BINDER_TYPE_FD:
856 object_size = sizeof(struct binder_fd_object);
857 break;
Martijn Coenen79802402017-02-03 14:40:51 -0800858 case BINDER_TYPE_PTR:
859 object_size = sizeof(struct binder_buffer_object);
860 break;
Martijn Coenendef95c72017-02-03 14:40:52 -0800861 case BINDER_TYPE_FDA:
862 object_size = sizeof(struct binder_fd_array_object);
863 break;
Martijn Coenenfeba3902017-02-03 14:40:45 -0800864 default:
865 return 0;
866 }
867 if (offset <= buffer->data_size - object_size &&
868 buffer->data_size >= object_size)
869 return object_size;
870 else
871 return 0;
872}
873
Martijn Coenen79802402017-02-03 14:40:51 -0800874/**
875 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
876 * @b: binder_buffer containing the object
877 * @index: index in offset array at which the binder_buffer_object is
878 * located
879 * @start: points to the start of the offset array
880 * @num_valid: the number of valid offsets in the offset array
881 *
882 * Return: If @index is within the valid range of the offset array
883 * described by @start and @num_valid, and if there's a valid
884 * binder_buffer_object at the offset found in index @index
885 * of the offset array, that object is returned. Otherwise,
886 * %NULL is returned.
887 * Note that the offset found in index @index itself is not
888 * verified; this function assumes that @num_valid elements
889 * from @start were previously verified to have valid offsets.
890 */
891static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
892 binder_size_t index,
893 binder_size_t *start,
894 binder_size_t num_valid)
895{
896 struct binder_buffer_object *buffer_obj;
897 binder_size_t *offp;
898
899 if (index >= num_valid)
900 return NULL;
901
902 offp = start + index;
903 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
904 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
905 return NULL;
906
907 return buffer_obj;
908}
909
910/**
911 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
912 * @b: transaction buffer
913 * @objects_start start of objects buffer
914 * @buffer: binder_buffer_object in which to fix up
915 * @offset: start offset in @buffer to fix up
916 * @last_obj: last binder_buffer_object that we fixed up in
917 * @last_min_offset: minimum fixup offset in @last_obj
918 *
919 * Return: %true if a fixup in buffer @buffer at offset @offset is
920 * allowed.
921 *
922 * For safety reasons, we only allow fixups inside a buffer to happen
923 * at increasing offsets; additionally, we only allow fixup on the last
924 * buffer object that was verified, or one of its parents.
925 *
926 * Example of what is allowed:
927 *
928 * A
929 * B (parent = A, offset = 0)
930 * C (parent = A, offset = 16)
931 * D (parent = C, offset = 0)
932 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
933 *
934 * Examples of what is not allowed:
935 *
936 * Decreasing offsets within the same parent:
937 * A
938 * C (parent = A, offset = 16)
939 * B (parent = A, offset = 0) // decreasing offset within A
940 *
941 * Referring to a parent that wasn't the last object or any of its parents:
942 * A
943 * B (parent = A, offset = 0)
944 * C (parent = A, offset = 0)
945 * C (parent = A, offset = 16)
946 * D (parent = B, offset = 0) // B is not A or any of A's parents
947 */
948static bool binder_validate_fixup(struct binder_buffer *b,
949 binder_size_t *objects_start,
950 struct binder_buffer_object *buffer,
951 binder_size_t fixup_offset,
952 struct binder_buffer_object *last_obj,
953 binder_size_t last_min_offset)
954{
955 if (!last_obj) {
956 /* Nothing to fix up in */
957 return false;
958 }
959
960 while (last_obj != buffer) {
961 /*
962 * Safe to retrieve the parent of last_obj, since it
963 * was already previously verified by the driver.
964 */
965 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
966 return false;
967 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
968 last_obj = (struct binder_buffer_object *)
969 (b->data + *(objects_start + last_obj->parent));
970 }
971 return (fixup_offset >= last_min_offset);
972}
973
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900974static void binder_transaction_buffer_release(struct binder_proc *proc,
975 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800976 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900977{
Martijn Coenen79802402017-02-03 14:40:51 -0800978 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900979 int debug_id = buffer->debug_id;
980
981 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530982 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900983 proc->pid, buffer->debug_id,
984 buffer->data_size, buffer->offsets_size, failed_at);
985
986 if (buffer->target_node)
987 binder_dec_node(buffer->target_node, 1, 0);
988
Martijn Coenen79802402017-02-03 14:40:51 -0800989 off_start = (binder_size_t *)(buffer->data +
990 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900991 if (failed_at)
992 off_end = failed_at;
993 else
Martijn Coenen79802402017-02-03 14:40:51 -0800994 off_end = (void *)off_start + buffer->offsets_size;
995 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -0800996 struct binder_object_header *hdr;
997 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +0900998
Martijn Coenenfeba3902017-02-03 14:40:45 -0800999 if (object_size == 0) {
1000 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001001 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001002 continue;
1003 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001004 hdr = (struct binder_object_header *)(buffer->data + *offp);
1005 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001006 case BINDER_TYPE_BINDER:
1007 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001008 struct flat_binder_object *fp;
1009 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09001010
Martijn Coenenfeba3902017-02-03 14:40:45 -08001011 fp = to_flat_binder_object(hdr);
1012 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001013 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001014 pr_err("transaction release %d bad node %016llx\n",
1015 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001016 break;
1017 }
1018 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001019 " node %d u%016llx\n",
1020 node->debug_id, (u64)node->ptr);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001021 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1022 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001023 } break;
1024 case BINDER_TYPE_HANDLE:
1025 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001026 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001027 struct binder_ref *ref;
1028
Martijn Coenenfeba3902017-02-03 14:40:45 -08001029 fp = to_flat_binder_object(hdr);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001030 ref = binder_get_ref(proc, fp->handle,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001031 hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001032 if (ref == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001033 pr_err("transaction release %d bad handle %d\n",
Anmol Sarma56b468f2012-10-30 22:35:43 +05301034 debug_id, fp->handle);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001035 break;
1036 }
1037 binder_debug(BINDER_DEBUG_TRANSACTION,
1038 " ref %d desc %d (node %d)\n",
1039 ref->debug_id, ref->desc, ref->node->debug_id);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001040 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001041 } break;
1042
Martijn Coenenfeba3902017-02-03 14:40:45 -08001043 case BINDER_TYPE_FD: {
1044 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1045
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001046 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001047 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001048 if (failed_at)
Martijn Coenenfeba3902017-02-03 14:40:45 -08001049 task_close_fd(proc, fp->fd);
1050 } break;
Martijn Coenen79802402017-02-03 14:40:51 -08001051 case BINDER_TYPE_PTR:
1052 /*
1053 * Nothing to do here, this will get cleaned up when the
1054 * transaction buffer gets freed
1055 */
1056 break;
Martijn Coenendef95c72017-02-03 14:40:52 -08001057 case BINDER_TYPE_FDA: {
1058 struct binder_fd_array_object *fda;
1059 struct binder_buffer_object *parent;
1060 uintptr_t parent_buffer;
1061 u32 *fd_array;
1062 size_t fd_index;
1063 binder_size_t fd_buf_size;
1064
1065 fda = to_binder_fd_array_object(hdr);
1066 parent = binder_validate_ptr(buffer, fda->parent,
1067 off_start,
1068 offp - off_start);
1069 if (!parent) {
1070 pr_err("transaction release %d bad parent offset",
1071 debug_id);
1072 continue;
1073 }
1074 /*
1075 * Since the parent was already fixed up, convert it
1076 * back to kernel address space to access it
1077 */
1078 parent_buffer = parent->buffer -
Todd Kjos19c98722017-06-29 12:01:40 -07001079 binder_alloc_get_user_buffer_offset(
1080 &proc->alloc);
Martijn Coenendef95c72017-02-03 14:40:52 -08001081
1082 fd_buf_size = sizeof(u32) * fda->num_fds;
1083 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1084 pr_err("transaction release %d invalid number of fds (%lld)\n",
1085 debug_id, (u64)fda->num_fds);
1086 continue;
1087 }
1088 if (fd_buf_size > parent->length ||
1089 fda->parent_offset > parent->length - fd_buf_size) {
1090 /* No space for all file descriptors here. */
1091 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1092 debug_id, (u64)fda->num_fds);
1093 continue;
1094 }
1095 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1096 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1097 task_close_fd(proc, fd_array[fd_index]);
1098 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001099 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001100 pr_err("transaction release %d bad object type %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001101 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001102 break;
1103 }
1104 }
1105}
1106
Martijn Coenena056af42017-02-03 14:40:49 -08001107static int binder_translate_binder(struct flat_binder_object *fp,
1108 struct binder_transaction *t,
1109 struct binder_thread *thread)
1110{
1111 struct binder_node *node;
1112 struct binder_ref *ref;
1113 struct binder_proc *proc = thread->proc;
1114 struct binder_proc *target_proc = t->to_proc;
1115
1116 node = binder_get_node(proc, fp->binder);
1117 if (!node) {
1118 node = binder_new_node(proc, fp->binder, fp->cookie);
1119 if (!node)
1120 return -ENOMEM;
1121
1122 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1123 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1124 }
1125 if (fp->cookie != node->cookie) {
1126 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1127 proc->pid, thread->pid, (u64)fp->binder,
1128 node->debug_id, (u64)fp->cookie,
1129 (u64)node->cookie);
1130 return -EINVAL;
1131 }
1132 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1133 return -EPERM;
1134
1135 ref = binder_get_ref_for_node(target_proc, node);
1136 if (!ref)
1137 return -EINVAL;
1138
1139 if (fp->hdr.type == BINDER_TYPE_BINDER)
1140 fp->hdr.type = BINDER_TYPE_HANDLE;
1141 else
1142 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1143 fp->binder = 0;
1144 fp->handle = ref->desc;
1145 fp->cookie = 0;
1146 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1147
1148 trace_binder_transaction_node_to_ref(t, node, ref);
1149 binder_debug(BINDER_DEBUG_TRANSACTION,
1150 " node %d u%016llx -> ref %d desc %d\n",
1151 node->debug_id, (u64)node->ptr,
1152 ref->debug_id, ref->desc);
1153
1154 return 0;
1155}
1156
1157static int binder_translate_handle(struct flat_binder_object *fp,
1158 struct binder_transaction *t,
1159 struct binder_thread *thread)
1160{
1161 struct binder_ref *ref;
1162 struct binder_proc *proc = thread->proc;
1163 struct binder_proc *target_proc = t->to_proc;
1164
1165 ref = binder_get_ref(proc, fp->handle,
1166 fp->hdr.type == BINDER_TYPE_HANDLE);
1167 if (!ref) {
1168 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1169 proc->pid, thread->pid, fp->handle);
1170 return -EINVAL;
1171 }
1172 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1173 return -EPERM;
1174
1175 if (ref->node->proc == target_proc) {
1176 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1177 fp->hdr.type = BINDER_TYPE_BINDER;
1178 else
1179 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1180 fp->binder = ref->node->ptr;
1181 fp->cookie = ref->node->cookie;
1182 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1183 0, NULL);
1184 trace_binder_transaction_ref_to_node(t, ref);
1185 binder_debug(BINDER_DEBUG_TRANSACTION,
1186 " ref %d desc %d -> node %d u%016llx\n",
1187 ref->debug_id, ref->desc, ref->node->debug_id,
1188 (u64)ref->node->ptr);
1189 } else {
1190 struct binder_ref *new_ref;
1191
1192 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1193 if (!new_ref)
1194 return -EINVAL;
1195
1196 fp->binder = 0;
1197 fp->handle = new_ref->desc;
1198 fp->cookie = 0;
1199 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1200 NULL);
1201 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1202 binder_debug(BINDER_DEBUG_TRANSACTION,
1203 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1204 ref->debug_id, ref->desc, new_ref->debug_id,
1205 new_ref->desc, ref->node->debug_id);
1206 }
1207 return 0;
1208}
1209
1210static int binder_translate_fd(int fd,
1211 struct binder_transaction *t,
1212 struct binder_thread *thread,
1213 struct binder_transaction *in_reply_to)
1214{
1215 struct binder_proc *proc = thread->proc;
1216 struct binder_proc *target_proc = t->to_proc;
1217 int target_fd;
1218 struct file *file;
1219 int ret;
1220 bool target_allows_fd;
1221
1222 if (in_reply_to)
1223 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1224 else
1225 target_allows_fd = t->buffer->target_node->accept_fds;
1226 if (!target_allows_fd) {
1227 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1228 proc->pid, thread->pid,
1229 in_reply_to ? "reply" : "transaction",
1230 fd);
1231 ret = -EPERM;
1232 goto err_fd_not_accepted;
1233 }
1234
1235 file = fget(fd);
1236 if (!file) {
1237 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1238 proc->pid, thread->pid, fd);
1239 ret = -EBADF;
1240 goto err_fget;
1241 }
1242 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1243 if (ret < 0) {
1244 ret = -EPERM;
1245 goto err_security;
1246 }
1247
1248 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1249 if (target_fd < 0) {
1250 ret = -ENOMEM;
1251 goto err_get_unused_fd;
1252 }
1253 task_fd_install(target_proc, target_fd, file);
1254 trace_binder_transaction_fd(t, fd, target_fd);
1255 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1256 fd, target_fd);
1257
1258 return target_fd;
1259
1260err_get_unused_fd:
1261err_security:
1262 fput(file);
1263err_fget:
1264err_fd_not_accepted:
1265 return ret;
1266}
1267
Martijn Coenendef95c72017-02-03 14:40:52 -08001268static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1269 struct binder_buffer_object *parent,
1270 struct binder_transaction *t,
1271 struct binder_thread *thread,
1272 struct binder_transaction *in_reply_to)
1273{
1274 binder_size_t fdi, fd_buf_size, num_installed_fds;
1275 int target_fd;
1276 uintptr_t parent_buffer;
1277 u32 *fd_array;
1278 struct binder_proc *proc = thread->proc;
1279 struct binder_proc *target_proc = t->to_proc;
1280
1281 fd_buf_size = sizeof(u32) * fda->num_fds;
1282 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1283 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1284 proc->pid, thread->pid, (u64)fda->num_fds);
1285 return -EINVAL;
1286 }
1287 if (fd_buf_size > parent->length ||
1288 fda->parent_offset > parent->length - fd_buf_size) {
1289 /* No space for all file descriptors here. */
1290 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1291 proc->pid, thread->pid, (u64)fda->num_fds);
1292 return -EINVAL;
1293 }
1294 /*
1295 * Since the parent was already fixed up, convert it
1296 * back to the kernel address space to access it
1297 */
Todd Kjos19c98722017-06-29 12:01:40 -07001298 parent_buffer = parent->buffer -
1299 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenendef95c72017-02-03 14:40:52 -08001300 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1301 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1302 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1303 proc->pid, thread->pid);
1304 return -EINVAL;
1305 }
1306 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1307 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1308 in_reply_to);
1309 if (target_fd < 0)
1310 goto err_translate_fd_failed;
1311 fd_array[fdi] = target_fd;
1312 }
1313 return 0;
1314
1315err_translate_fd_failed:
1316 /*
1317 * Failed to allocate fd or security error, free fds
1318 * installed so far.
1319 */
1320 num_installed_fds = fdi;
1321 for (fdi = 0; fdi < num_installed_fds; fdi++)
1322 task_close_fd(target_proc, fd_array[fdi]);
1323 return target_fd;
1324}
1325
Martijn Coenen79802402017-02-03 14:40:51 -08001326static int binder_fixup_parent(struct binder_transaction *t,
1327 struct binder_thread *thread,
1328 struct binder_buffer_object *bp,
1329 binder_size_t *off_start,
1330 binder_size_t num_valid,
1331 struct binder_buffer_object *last_fixup_obj,
1332 binder_size_t last_fixup_min_off)
1333{
1334 struct binder_buffer_object *parent;
1335 u8 *parent_buffer;
1336 struct binder_buffer *b = t->buffer;
1337 struct binder_proc *proc = thread->proc;
1338 struct binder_proc *target_proc = t->to_proc;
1339
1340 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1341 return 0;
1342
1343 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1344 if (!parent) {
1345 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1346 proc->pid, thread->pid);
1347 return -EINVAL;
1348 }
1349
1350 if (!binder_validate_fixup(b, off_start,
1351 parent, bp->parent_offset,
1352 last_fixup_obj,
1353 last_fixup_min_off)) {
1354 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1355 proc->pid, thread->pid);
1356 return -EINVAL;
1357 }
1358
1359 if (parent->length < sizeof(binder_uintptr_t) ||
1360 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1361 /* No space for a pointer here! */
1362 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1363 proc->pid, thread->pid);
1364 return -EINVAL;
1365 }
1366 parent_buffer = (u8 *)(parent->buffer -
Todd Kjos19c98722017-06-29 12:01:40 -07001367 binder_alloc_get_user_buffer_offset(
1368 &target_proc->alloc));
Martijn Coenen79802402017-02-03 14:40:51 -08001369 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1370
1371 return 0;
1372}
1373
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001374static void binder_transaction(struct binder_proc *proc,
1375 struct binder_thread *thread,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001376 struct binder_transaction_data *tr, int reply,
1377 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001378{
Martijn Coenena056af42017-02-03 14:40:49 -08001379 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001380 struct binder_transaction *t;
1381 struct binder_work *tcomplete;
Martijn Coenen79802402017-02-03 14:40:51 -08001382 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001383 binder_size_t off_min;
Martijn Coenen79802402017-02-03 14:40:51 -08001384 u8 *sg_bufp, *sg_buf_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001385 struct binder_proc *target_proc;
1386 struct binder_thread *target_thread = NULL;
1387 struct binder_node *target_node = NULL;
1388 struct list_head *target_list;
1389 wait_queue_head_t *target_wait;
1390 struct binder_transaction *in_reply_to = NULL;
1391 struct binder_transaction_log_entry *e;
1392 uint32_t return_error;
Martijn Coenen79802402017-02-03 14:40:51 -08001393 struct binder_buffer_object *last_fixup_obj = NULL;
1394 binder_size_t last_fixup_min_off = 0;
Martijn Coenen342e5c92017-02-03 14:40:46 -08001395 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001396
1397 e = binder_transaction_log_add(&binder_transaction_log);
1398 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1399 e->from_proc = proc->pid;
1400 e->from_thread = thread->pid;
1401 e->target_handle = tr->target.handle;
1402 e->data_size = tr->data_size;
1403 e->offsets_size = tr->offsets_size;
Martijn Coenen14db3182017-02-03 14:40:47 -08001404 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001405
1406 if (reply) {
1407 in_reply_to = thread->transaction_stack;
1408 if (in_reply_to == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301409 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001410 proc->pid, thread->pid);
1411 return_error = BR_FAILED_REPLY;
1412 goto err_empty_call_stack;
1413 }
1414 binder_set_nice(in_reply_to->saved_priority);
1415 if (in_reply_to->to_thread != thread) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301416 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001417 proc->pid, thread->pid, in_reply_to->debug_id,
1418 in_reply_to->to_proc ?
1419 in_reply_to->to_proc->pid : 0,
1420 in_reply_to->to_thread ?
1421 in_reply_to->to_thread->pid : 0);
1422 return_error = BR_FAILED_REPLY;
1423 in_reply_to = NULL;
1424 goto err_bad_call_stack;
1425 }
1426 thread->transaction_stack = in_reply_to->to_parent;
1427 target_thread = in_reply_to->from;
1428 if (target_thread == NULL) {
1429 return_error = BR_DEAD_REPLY;
1430 goto err_dead_binder;
1431 }
1432 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301433 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001434 proc->pid, thread->pid,
1435 target_thread->transaction_stack ?
1436 target_thread->transaction_stack->debug_id : 0,
1437 in_reply_to->debug_id);
1438 return_error = BR_FAILED_REPLY;
1439 in_reply_to = NULL;
1440 target_thread = NULL;
1441 goto err_dead_binder;
1442 }
1443 target_proc = target_thread->proc;
1444 } else {
1445 if (tr->target.handle) {
1446 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001447
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001448 ref = binder_get_ref(proc, tr->target.handle, true);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001449 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301450 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001451 proc->pid, thread->pid);
1452 return_error = BR_FAILED_REPLY;
1453 goto err_invalid_target_handle;
1454 }
1455 target_node = ref->node;
1456 } else {
Martijn Coenen342e5c92017-02-03 14:40:46 -08001457 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001458 if (target_node == NULL) {
1459 return_error = BR_DEAD_REPLY;
1460 goto err_no_context_mgr_node;
1461 }
1462 }
1463 e->to_node = target_node->debug_id;
1464 target_proc = target_node->proc;
1465 if (target_proc == NULL) {
1466 return_error = BR_DEAD_REPLY;
1467 goto err_dead_binder;
1468 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001469 if (security_binder_transaction(proc->tsk,
1470 target_proc->tsk) < 0) {
1471 return_error = BR_FAILED_REPLY;
1472 goto err_invalid_target_handle;
1473 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001474 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1475 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09001476
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001477 tmp = thread->transaction_stack;
1478 if (tmp->to_thread != thread) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301479 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001480 proc->pid, thread->pid, tmp->debug_id,
1481 tmp->to_proc ? tmp->to_proc->pid : 0,
1482 tmp->to_thread ?
1483 tmp->to_thread->pid : 0);
1484 return_error = BR_FAILED_REPLY;
1485 goto err_bad_call_stack;
1486 }
1487 while (tmp) {
1488 if (tmp->from && tmp->from->proc == target_proc)
1489 target_thread = tmp->from;
1490 tmp = tmp->from_parent;
1491 }
1492 }
1493 }
1494 if (target_thread) {
1495 e->to_thread = target_thread->pid;
1496 target_list = &target_thread->todo;
1497 target_wait = &target_thread->wait;
1498 } else {
1499 target_list = &target_proc->todo;
1500 target_wait = &target_proc->wait;
1501 }
1502 e->to_proc = target_proc->pid;
1503
1504 /* TODO: reuse incoming transaction for reply */
1505 t = kzalloc(sizeof(*t), GFP_KERNEL);
1506 if (t == NULL) {
1507 return_error = BR_FAILED_REPLY;
1508 goto err_alloc_t_failed;
1509 }
1510 binder_stats_created(BINDER_STAT_TRANSACTION);
1511
1512 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1513 if (tcomplete == NULL) {
1514 return_error = BR_FAILED_REPLY;
1515 goto err_alloc_tcomplete_failed;
1516 }
1517 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1518
1519 t->debug_id = ++binder_last_id;
1520 e->debug_id = t->debug_id;
1521
1522 if (reply)
1523 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001524 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001525 proc->pid, thread->pid, t->debug_id,
1526 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001527 (u64)tr->data.ptr.buffer,
1528 (u64)tr->data.ptr.offsets,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001529 (u64)tr->data_size, (u64)tr->offsets_size,
1530 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001531 else
1532 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001533 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001534 proc->pid, thread->pid, t->debug_id,
1535 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001536 (u64)tr->data.ptr.buffer,
1537 (u64)tr->data.ptr.offsets,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001538 (u64)tr->data_size, (u64)tr->offsets_size,
1539 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001540
1541 if (!reply && !(tr->flags & TF_ONE_WAY))
1542 t->from = thread;
1543 else
1544 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03001545 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001546 t->to_proc = target_proc;
1547 t->to_thread = target_thread;
1548 t->code = tr->code;
1549 t->flags = tr->flags;
1550 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001551
1552 trace_binder_transaction(reply, t, target_node);
1553
Todd Kjos19c98722017-06-29 12:01:40 -07001554 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001555 tr->offsets_size, extra_buffers_size,
1556 !reply && (t->flags & TF_ONE_WAY));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001557 if (t->buffer == NULL) {
1558 return_error = BR_FAILED_REPLY;
1559 goto err_binder_alloc_buf_failed;
1560 }
1561 t->buffer->allow_user_free = 0;
1562 t->buffer->debug_id = t->debug_id;
1563 t->buffer->transaction = t;
1564 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001565 trace_binder_transaction_alloc_buf(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001566 if (target_node)
1567 binder_inc_node(target_node, 1, 0, NULL);
1568
Martijn Coenen79802402017-02-03 14:40:51 -08001569 off_start = (binder_size_t *)(t->buffer->data +
1570 ALIGN(tr->data_size, sizeof(void *)));
1571 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001572
Arve Hjønnevågda498892014-02-21 14:40:26 -08001573 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1574 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301575 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1576 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001577 return_error = BR_FAILED_REPLY;
1578 goto err_copy_data_failed;
1579 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001580 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1581 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301582 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1583 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001584 return_error = BR_FAILED_REPLY;
1585 goto err_copy_data_failed;
1586 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001587 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1588 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1589 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001590 return_error = BR_FAILED_REPLY;
1591 goto err_bad_offset;
1592 }
Martijn Coenen79802402017-02-03 14:40:51 -08001593 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
1594 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
1595 proc->pid, thread->pid,
1596 (u64)extra_buffers_size);
1597 return_error = BR_FAILED_REPLY;
1598 goto err_bad_offset;
1599 }
1600 off_end = (void *)off_start + tr->offsets_size;
1601 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
1602 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001603 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001604 for (; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001605 struct binder_object_header *hdr;
1606 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001607
Martijn Coenenfeba3902017-02-03 14:40:45 -08001608 if (object_size == 0 || *offp < off_min) {
1609 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001610 proc->pid, thread->pid, (u64)*offp,
1611 (u64)off_min,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001612 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001613 return_error = BR_FAILED_REPLY;
1614 goto err_bad_offset;
1615 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001616
1617 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1618 off_min = *offp + object_size;
1619 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001620 case BINDER_TYPE_BINDER:
1621 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001622 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09001623
Martijn Coenenfeba3902017-02-03 14:40:45 -08001624 fp = to_flat_binder_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08001625 ret = binder_translate_binder(fp, t, thread);
1626 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02001627 return_error = BR_FAILED_REPLY;
Martijn Coenena056af42017-02-03 14:40:49 -08001628 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001629 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001630 } break;
1631 case BINDER_TYPE_HANDLE:
1632 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001633 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001634
Martijn Coenenfeba3902017-02-03 14:40:45 -08001635 fp = to_flat_binder_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08001636 ret = binder_translate_handle(fp, t, thread);
1637 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001638 return_error = BR_FAILED_REPLY;
Martijn Coenena056af42017-02-03 14:40:49 -08001639 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001640 }
1641 } break;
1642
1643 case BINDER_TYPE_FD: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001644 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08001645 int target_fd = binder_translate_fd(fp->fd, t, thread,
1646 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001647
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001648 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001649 return_error = BR_FAILED_REPLY;
Martijn Coenena056af42017-02-03 14:40:49 -08001650 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001651 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001652 fp->pad_binder = 0;
1653 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001654 } break;
Martijn Coenendef95c72017-02-03 14:40:52 -08001655 case BINDER_TYPE_FDA: {
1656 struct binder_fd_array_object *fda =
1657 to_binder_fd_array_object(hdr);
1658 struct binder_buffer_object *parent =
1659 binder_validate_ptr(t->buffer, fda->parent,
1660 off_start,
1661 offp - off_start);
1662 if (!parent) {
1663 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1664 proc->pid, thread->pid);
1665 return_error = BR_FAILED_REPLY;
1666 goto err_bad_parent;
1667 }
1668 if (!binder_validate_fixup(t->buffer, off_start,
1669 parent, fda->parent_offset,
1670 last_fixup_obj,
1671 last_fixup_min_off)) {
1672 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1673 proc->pid, thread->pid);
1674 return_error = BR_FAILED_REPLY;
1675 goto err_bad_parent;
1676 }
1677 ret = binder_translate_fd_array(fda, parent, t, thread,
1678 in_reply_to);
1679 if (ret < 0) {
1680 return_error = BR_FAILED_REPLY;
1681 goto err_translate_failed;
1682 }
1683 last_fixup_obj = parent;
1684 last_fixup_min_off =
1685 fda->parent_offset + sizeof(u32) * fda->num_fds;
1686 } break;
Martijn Coenen79802402017-02-03 14:40:51 -08001687 case BINDER_TYPE_PTR: {
1688 struct binder_buffer_object *bp =
1689 to_binder_buffer_object(hdr);
1690 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001691
Martijn Coenen79802402017-02-03 14:40:51 -08001692 if (bp->length > buf_left) {
1693 binder_user_error("%d:%d got transaction with too large buffer\n",
1694 proc->pid, thread->pid);
1695 return_error = BR_FAILED_REPLY;
1696 goto err_bad_offset;
1697 }
1698 if (copy_from_user(sg_bufp,
1699 (const void __user *)(uintptr_t)
1700 bp->buffer, bp->length)) {
1701 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1702 proc->pid, thread->pid);
1703 return_error = BR_FAILED_REPLY;
1704 goto err_copy_data_failed;
1705 }
1706 /* Fixup buffer pointer to target proc address space */
1707 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjos19c98722017-06-29 12:01:40 -07001708 binder_alloc_get_user_buffer_offset(
1709 &target_proc->alloc);
Martijn Coenen79802402017-02-03 14:40:51 -08001710 sg_bufp += ALIGN(bp->length, sizeof(u64));
1711
1712 ret = binder_fixup_parent(t, thread, bp, off_start,
1713 offp - off_start,
1714 last_fixup_obj,
1715 last_fixup_min_off);
1716 if (ret < 0) {
1717 return_error = BR_FAILED_REPLY;
1718 goto err_translate_failed;
1719 }
1720 last_fixup_obj = bp;
1721 last_fixup_min_off = 0;
1722 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001723 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001724 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001725 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001726 return_error = BR_FAILED_REPLY;
1727 goto err_bad_object_type;
1728 }
1729 }
1730 if (reply) {
1731 BUG_ON(t->buffer->async_transaction != 0);
1732 binder_pop_transaction(target_thread, in_reply_to);
1733 } else if (!(t->flags & TF_ONE_WAY)) {
1734 BUG_ON(t->buffer->async_transaction != 0);
1735 t->need_reply = 1;
1736 t->from_parent = thread->transaction_stack;
1737 thread->transaction_stack = t;
1738 } else {
1739 BUG_ON(target_node == NULL);
1740 BUG_ON(t->buffer->async_transaction != 1);
1741 if (target_node->has_async_transaction) {
1742 target_list = &target_node->async_todo;
1743 target_wait = NULL;
1744 } else
1745 target_node->has_async_transaction = 1;
1746 }
1747 t->work.type = BINDER_WORK_TRANSACTION;
1748 list_add_tail(&t->work.entry, target_list);
1749 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1750 list_add_tail(&tcomplete->entry, &thread->todo);
Riley Andrews00b40d62017-06-29 12:01:37 -07001751 if (target_wait) {
1752 if (reply || !(t->flags & TF_ONE_WAY))
1753 wake_up_interruptible_sync(target_wait);
1754 else
1755 wake_up_interruptible(target_wait);
1756 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001757 return;
1758
Martijn Coenena056af42017-02-03 14:40:49 -08001759err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001760err_bad_object_type:
1761err_bad_offset:
Martijn Coenendef95c72017-02-03 14:40:52 -08001762err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001763err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001764 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001765 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1766 t->buffer->transaction = NULL;
Todd Kjos19c98722017-06-29 12:01:40 -07001767 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001768err_binder_alloc_buf_failed:
1769 kfree(tcomplete);
1770 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1771err_alloc_tcomplete_failed:
1772 kfree(t);
1773 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1774err_alloc_t_failed:
1775err_bad_call_stack:
1776err_empty_call_stack:
1777err_dead_binder:
1778err_invalid_target_handle:
1779err_no_context_mgr_node:
1780 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001781 "%d:%d transaction failed %d, size %lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001782 proc->pid, thread->pid, return_error,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001783 (u64)tr->data_size, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001784
1785 {
1786 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09001787
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001788 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1789 *fe = *e;
1790 }
1791
1792 BUG_ON(thread->return_error != BR_OK);
1793 if (in_reply_to) {
1794 thread->return_error = BR_TRANSACTION_COMPLETE;
1795 binder_send_failed_reply(in_reply_to, return_error);
1796 } else
1797 thread->return_error = return_error;
1798}
1799
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02001800static int binder_thread_write(struct binder_proc *proc,
1801 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001802 binder_uintptr_t binder_buffer, size_t size,
1803 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001804{
1805 uint32_t cmd;
Martijn Coenen342e5c92017-02-03 14:40:46 -08001806 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001807 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001808 void __user *ptr = buffer + *consumed;
1809 void __user *end = buffer + size;
1810
1811 while (ptr < end && thread->return_error == BR_OK) {
1812 if (get_user(cmd, (uint32_t __user *)ptr))
1813 return -EFAULT;
1814 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001815 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001816 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1817 binder_stats.bc[_IOC_NR(cmd)]++;
1818 proc->stats.bc[_IOC_NR(cmd)]++;
1819 thread->stats.bc[_IOC_NR(cmd)]++;
1820 }
1821 switch (cmd) {
1822 case BC_INCREFS:
1823 case BC_ACQUIRE:
1824 case BC_RELEASE:
1825 case BC_DECREFS: {
1826 uint32_t target;
1827 struct binder_ref *ref;
1828 const char *debug_string;
1829
1830 if (get_user(target, (uint32_t __user *)ptr))
1831 return -EFAULT;
1832 ptr += sizeof(uint32_t);
Martijn Coenen342e5c92017-02-03 14:40:46 -08001833 if (target == 0 && context->binder_context_mgr_node &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001834 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1835 ref = binder_get_ref_for_node(proc,
Martijn Coenen342e5c92017-02-03 14:40:46 -08001836 context->binder_context_mgr_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001837 if (ref->desc != target) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301838 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001839 proc->pid, thread->pid,
1840 ref->desc);
1841 }
1842 } else
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001843 ref = binder_get_ref(proc, target,
1844 cmd == BC_ACQUIRE ||
1845 cmd == BC_RELEASE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001846 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301847 binder_user_error("%d:%d refcount change on invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001848 proc->pid, thread->pid, target);
1849 break;
1850 }
1851 switch (cmd) {
1852 case BC_INCREFS:
1853 debug_string = "IncRefs";
1854 binder_inc_ref(ref, 0, NULL);
1855 break;
1856 case BC_ACQUIRE:
1857 debug_string = "Acquire";
1858 binder_inc_ref(ref, 1, NULL);
1859 break;
1860 case BC_RELEASE:
1861 debug_string = "Release";
1862 binder_dec_ref(ref, 1);
1863 break;
1864 case BC_DECREFS:
1865 default:
1866 debug_string = "DecRefs";
1867 binder_dec_ref(ref, 0);
1868 break;
1869 }
1870 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301871 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001872 proc->pid, thread->pid, debug_string, ref->debug_id,
1873 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1874 break;
1875 }
1876 case BC_INCREFS_DONE:
1877 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001878 binder_uintptr_t node_ptr;
1879 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001880 struct binder_node *node;
1881
Arve Hjønnevågda498892014-02-21 14:40:26 -08001882 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001883 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001884 ptr += sizeof(binder_uintptr_t);
1885 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001886 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001887 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001888 node = binder_get_node(proc, node_ptr);
1889 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001890 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001891 proc->pid, thread->pid,
1892 cmd == BC_INCREFS_DONE ?
1893 "BC_INCREFS_DONE" :
1894 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001895 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001896 break;
1897 }
1898 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001899 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001900 proc->pid, thread->pid,
1901 cmd == BC_INCREFS_DONE ?
1902 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001903 (u64)node_ptr, node->debug_id,
1904 (u64)cookie, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001905 break;
1906 }
1907 if (cmd == BC_ACQUIRE_DONE) {
1908 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301909 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001910 proc->pid, thread->pid,
1911 node->debug_id);
1912 break;
1913 }
1914 node->pending_strong_ref = 0;
1915 } else {
1916 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301917 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001918 proc->pid, thread->pid,
1919 node->debug_id);
1920 break;
1921 }
1922 node->pending_weak_ref = 0;
1923 }
1924 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1925 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301926 "%d:%d %s node %d ls %d lw %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001927 proc->pid, thread->pid,
1928 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1929 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1930 break;
1931 }
1932 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05301933 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001934 return -EINVAL;
1935 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05301936 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001937 return -EINVAL;
1938
1939 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001940 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001941 struct binder_buffer *buffer;
1942
Arve Hjønnevågda498892014-02-21 14:40:26 -08001943 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001944 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001945 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001946
Todd Kjos19c98722017-06-29 12:01:40 -07001947 buffer = binder_alloc_buffer_lookup(&proc->alloc,
1948 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001949 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001950 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1951 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001952 break;
1953 }
1954 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001955 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1956 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001957 break;
1958 }
1959 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001960 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1961 proc->pid, thread->pid, (u64)data_ptr,
1962 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001963 buffer->transaction ? "active" : "finished");
1964
1965 if (buffer->transaction) {
1966 buffer->transaction->buffer = NULL;
1967 buffer->transaction = NULL;
1968 }
1969 if (buffer->async_transaction && buffer->target_node) {
1970 BUG_ON(!buffer->target_node->has_async_transaction);
1971 if (list_empty(&buffer->target_node->async_todo))
1972 buffer->target_node->has_async_transaction = 0;
1973 else
1974 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1975 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001976 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001977 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjos19c98722017-06-29 12:01:40 -07001978 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001979 break;
1980 }
1981
Martijn Coenen79802402017-02-03 14:40:51 -08001982 case BC_TRANSACTION_SG:
1983 case BC_REPLY_SG: {
1984 struct binder_transaction_data_sg tr;
1985
1986 if (copy_from_user(&tr, ptr, sizeof(tr)))
1987 return -EFAULT;
1988 ptr += sizeof(tr);
1989 binder_transaction(proc, thread, &tr.transaction_data,
1990 cmd == BC_REPLY_SG, tr.buffers_size);
1991 break;
1992 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001993 case BC_TRANSACTION:
1994 case BC_REPLY: {
1995 struct binder_transaction_data tr;
1996
1997 if (copy_from_user(&tr, ptr, sizeof(tr)))
1998 return -EFAULT;
1999 ptr += sizeof(tr);
Martijn Coenen4bfac802017-02-03 14:40:50 -08002000 binder_transaction(proc, thread, &tr,
2001 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002002 break;
2003 }
2004
2005 case BC_REGISTER_LOOPER:
2006 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302007 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002008 proc->pid, thread->pid);
2009 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2010 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302011 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002012 proc->pid, thread->pid);
2013 } else if (proc->requested_threads == 0) {
2014 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302015 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002016 proc->pid, thread->pid);
2017 } else {
2018 proc->requested_threads--;
2019 proc->requested_threads_started++;
2020 }
2021 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2022 break;
2023 case BC_ENTER_LOOPER:
2024 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302025 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002026 proc->pid, thread->pid);
2027 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2028 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302029 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002030 proc->pid, thread->pid);
2031 }
2032 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2033 break;
2034 case BC_EXIT_LOOPER:
2035 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302036 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002037 proc->pid, thread->pid);
2038 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2039 break;
2040
2041 case BC_REQUEST_DEATH_NOTIFICATION:
2042 case BC_CLEAR_DEATH_NOTIFICATION: {
2043 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002044 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002045 struct binder_ref *ref;
2046 struct binder_ref_death *death;
2047
2048 if (get_user(target, (uint32_t __user *)ptr))
2049 return -EFAULT;
2050 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002051 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002052 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002053 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002054 ref = binder_get_ref(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002055 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302056 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002057 proc->pid, thread->pid,
2058 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2059 "BC_REQUEST_DEATH_NOTIFICATION" :
2060 "BC_CLEAR_DEATH_NOTIFICATION",
2061 target);
2062 break;
2063 }
2064
2065 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002066 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002067 proc->pid, thread->pid,
2068 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2069 "BC_REQUEST_DEATH_NOTIFICATION" :
2070 "BC_CLEAR_DEATH_NOTIFICATION",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002071 (u64)cookie, ref->debug_id, ref->desc,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002072 ref->strong, ref->weak, ref->node->debug_id);
2073
2074 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2075 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302076 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002077 proc->pid, thread->pid);
2078 break;
2079 }
2080 death = kzalloc(sizeof(*death), GFP_KERNEL);
2081 if (death == NULL) {
2082 thread->return_error = BR_ERROR;
2083 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302084 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002085 proc->pid, thread->pid);
2086 break;
2087 }
2088 binder_stats_created(BINDER_STAT_DEATH);
2089 INIT_LIST_HEAD(&death->work.entry);
2090 death->cookie = cookie;
2091 ref->death = death;
2092 if (ref->node->proc == NULL) {
2093 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2094 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2095 list_add_tail(&ref->death->work.entry, &thread->todo);
2096 } else {
2097 list_add_tail(&ref->death->work.entry, &proc->todo);
2098 wake_up_interruptible(&proc->wait);
2099 }
2100 }
2101 } else {
2102 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302103 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002104 proc->pid, thread->pid);
2105 break;
2106 }
2107 death = ref->death;
2108 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002109 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002110 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002111 (u64)death->cookie,
2112 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002113 break;
2114 }
2115 ref->death = NULL;
2116 if (list_empty(&death->work.entry)) {
2117 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2118 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2119 list_add_tail(&death->work.entry, &thread->todo);
2120 } else {
2121 list_add_tail(&death->work.entry, &proc->todo);
2122 wake_up_interruptible(&proc->wait);
2123 }
2124 } else {
2125 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2126 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2127 }
2128 }
2129 } break;
2130 case BC_DEAD_BINDER_DONE: {
2131 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002132 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002133 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09002134
Arve Hjønnevågda498892014-02-21 14:40:26 -08002135 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002136 return -EFAULT;
2137
Lisa Du7a64cd82016-02-17 09:32:52 +08002138 ptr += sizeof(cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002139 list_for_each_entry(w, &proc->delivered_death, entry) {
2140 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
Seunghun Lee10f62862014-05-01 01:30:23 +09002141
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002142 if (tmp_death->cookie == cookie) {
2143 death = tmp_death;
2144 break;
2145 }
2146 }
2147 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002148 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2149 proc->pid, thread->pid, (u64)cookie,
2150 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002151 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002152 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2153 proc->pid, thread->pid, (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002154 break;
2155 }
2156
2157 list_del_init(&death->work.entry);
2158 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2159 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2160 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2161 list_add_tail(&death->work.entry, &thread->todo);
2162 } else {
2163 list_add_tail(&death->work.entry, &proc->todo);
2164 wake_up_interruptible(&proc->wait);
2165 }
2166 }
2167 } break;
2168
2169 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302170 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002171 proc->pid, thread->pid, cmd);
2172 return -EINVAL;
2173 }
2174 *consumed = ptr - buffer;
2175 }
2176 return 0;
2177}
2178
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02002179static void binder_stat_br(struct binder_proc *proc,
2180 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002181{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002182 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002183 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2184 binder_stats.br[_IOC_NR(cmd)]++;
2185 proc->stats.br[_IOC_NR(cmd)]++;
2186 thread->stats.br[_IOC_NR(cmd)]++;
2187 }
2188}
2189
2190static int binder_has_proc_work(struct binder_proc *proc,
2191 struct binder_thread *thread)
2192{
2193 return !list_empty(&proc->todo) ||
2194 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2195}
2196
2197static int binder_has_thread_work(struct binder_thread *thread)
2198{
2199 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2200 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2201}
2202
2203static int binder_thread_read(struct binder_proc *proc,
2204 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002205 binder_uintptr_t binder_buffer, size_t size,
2206 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002207{
Arve Hjønnevågda498892014-02-21 14:40:26 -08002208 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002209 void __user *ptr = buffer + *consumed;
2210 void __user *end = buffer + size;
2211
2212 int ret = 0;
2213 int wait_for_proc_work;
2214
2215 if (*consumed == 0) {
2216 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2217 return -EFAULT;
2218 ptr += sizeof(uint32_t);
2219 }
2220
2221retry:
2222 wait_for_proc_work = thread->transaction_stack == NULL &&
2223 list_empty(&thread->todo);
2224
2225 if (thread->return_error != BR_OK && ptr < end) {
2226 if (thread->return_error2 != BR_OK) {
2227 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2228 return -EFAULT;
2229 ptr += sizeof(uint32_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002230 binder_stat_br(proc, thread, thread->return_error2);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002231 if (ptr == end)
2232 goto done;
2233 thread->return_error2 = BR_OK;
2234 }
2235 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2236 return -EFAULT;
2237 ptr += sizeof(uint32_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002238 binder_stat_br(proc, thread, thread->return_error);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002239 thread->return_error = BR_OK;
2240 goto done;
2241 }
2242
2243
2244 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2245 if (wait_for_proc_work)
2246 proc->ready_threads++;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002247
2248 binder_unlock(__func__);
2249
2250 trace_binder_wait_for_work(wait_for_proc_work,
2251 !!thread->transaction_stack,
2252 !list_empty(&thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002253 if (wait_for_proc_work) {
2254 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2255 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302256 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002257 proc->pid, thread->pid, thread->looper);
2258 wait_event_interruptible(binder_user_error_wait,
2259 binder_stop_on_user_error < 2);
2260 }
2261 binder_set_nice(proc->default_priority);
2262 if (non_block) {
2263 if (!binder_has_proc_work(proc, thread))
2264 ret = -EAGAIN;
2265 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002266 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002267 } else {
2268 if (non_block) {
2269 if (!binder_has_thread_work(thread))
2270 ret = -EAGAIN;
2271 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002272 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002273 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002274
2275 binder_lock(__func__);
2276
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002277 if (wait_for_proc_work)
2278 proc->ready_threads--;
2279 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2280
2281 if (ret)
2282 return ret;
2283
2284 while (1) {
2285 uint32_t cmd;
2286 struct binder_transaction_data tr;
2287 struct binder_work *w;
2288 struct binder_transaction *t = NULL;
2289
Dmitry Voytik395262a2014-09-08 18:16:34 +04002290 if (!list_empty(&thread->todo)) {
2291 w = list_first_entry(&thread->todo, struct binder_work,
2292 entry);
2293 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2294 w = list_first_entry(&proc->todo, struct binder_work,
2295 entry);
2296 } else {
2297 /* no data added */
2298 if (ptr - buffer == 4 &&
2299 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002300 goto retry;
2301 break;
2302 }
2303
2304 if (end - ptr < sizeof(tr) + 4)
2305 break;
2306
2307 switch (w->type) {
2308 case BINDER_WORK_TRANSACTION: {
2309 t = container_of(w, struct binder_transaction, work);
2310 } break;
2311 case BINDER_WORK_TRANSACTION_COMPLETE: {
2312 cmd = BR_TRANSACTION_COMPLETE;
2313 if (put_user(cmd, (uint32_t __user *)ptr))
2314 return -EFAULT;
2315 ptr += sizeof(uint32_t);
2316
2317 binder_stat_br(proc, thread, cmd);
2318 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302319 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002320 proc->pid, thread->pid);
2321
2322 list_del(&w->entry);
2323 kfree(w);
2324 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2325 } break;
2326 case BINDER_WORK_NODE: {
2327 struct binder_node *node = container_of(w, struct binder_node, work);
2328 uint32_t cmd = BR_NOOP;
2329 const char *cmd_name;
2330 int strong = node->internal_strong_refs || node->local_strong_refs;
2331 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
Seunghun Lee10f62862014-05-01 01:30:23 +09002332
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002333 if (weak && !node->has_weak_ref) {
2334 cmd = BR_INCREFS;
2335 cmd_name = "BR_INCREFS";
2336 node->has_weak_ref = 1;
2337 node->pending_weak_ref = 1;
2338 node->local_weak_refs++;
2339 } else if (strong && !node->has_strong_ref) {
2340 cmd = BR_ACQUIRE;
2341 cmd_name = "BR_ACQUIRE";
2342 node->has_strong_ref = 1;
2343 node->pending_strong_ref = 1;
2344 node->local_strong_refs++;
2345 } else if (!strong && node->has_strong_ref) {
2346 cmd = BR_RELEASE;
2347 cmd_name = "BR_RELEASE";
2348 node->has_strong_ref = 0;
2349 } else if (!weak && node->has_weak_ref) {
2350 cmd = BR_DECREFS;
2351 cmd_name = "BR_DECREFS";
2352 node->has_weak_ref = 0;
2353 }
2354 if (cmd != BR_NOOP) {
2355 if (put_user(cmd, (uint32_t __user *)ptr))
2356 return -EFAULT;
2357 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002358 if (put_user(node->ptr,
2359 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002360 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002361 ptr += sizeof(binder_uintptr_t);
2362 if (put_user(node->cookie,
2363 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002364 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002365 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002366
2367 binder_stat_br(proc, thread, cmd);
2368 binder_debug(BINDER_DEBUG_USER_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002369 "%d:%d %s %d u%016llx c%016llx\n",
2370 proc->pid, thread->pid, cmd_name,
2371 node->debug_id,
2372 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002373 } else {
2374 list_del_init(&w->entry);
2375 if (!weak && !strong) {
2376 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002377 "%d:%d node %d u%016llx c%016llx deleted\n",
2378 proc->pid, thread->pid,
2379 node->debug_id,
2380 (u64)node->ptr,
2381 (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002382 rb_erase(&node->rb_node, &proc->nodes);
2383 kfree(node);
2384 binder_stats_deleted(BINDER_STAT_NODE);
2385 } else {
2386 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002387 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2388 proc->pid, thread->pid,
2389 node->debug_id,
2390 (u64)node->ptr,
2391 (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002392 }
2393 }
2394 } break;
2395 case BINDER_WORK_DEAD_BINDER:
2396 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2397 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2398 struct binder_ref_death *death;
2399 uint32_t cmd;
2400
2401 death = container_of(w, struct binder_ref_death, work);
2402 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2403 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2404 else
2405 cmd = BR_DEAD_BINDER;
2406 if (put_user(cmd, (uint32_t __user *)ptr))
2407 return -EFAULT;
2408 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002409 if (put_user(death->cookie,
2410 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002411 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002412 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002413 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002414 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002415 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002416 proc->pid, thread->pid,
2417 cmd == BR_DEAD_BINDER ?
2418 "BR_DEAD_BINDER" :
2419 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002420 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002421
2422 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2423 list_del(&w->entry);
2424 kfree(death);
2425 binder_stats_deleted(BINDER_STAT_DEATH);
2426 } else
2427 list_move(&w->entry, &proc->delivered_death);
2428 if (cmd == BR_DEAD_BINDER)
2429 goto done; /* DEAD_BINDER notifications can cause transactions */
2430 } break;
2431 }
2432
2433 if (!t)
2434 continue;
2435
2436 BUG_ON(t->buffer == NULL);
2437 if (t->buffer->target_node) {
2438 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002439
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002440 tr.target.ptr = target_node->ptr;
2441 tr.cookie = target_node->cookie;
2442 t->saved_priority = task_nice(current);
2443 if (t->priority < target_node->min_priority &&
2444 !(t->flags & TF_ONE_WAY))
2445 binder_set_nice(t->priority);
2446 else if (!(t->flags & TF_ONE_WAY) ||
2447 t->saved_priority > target_node->min_priority)
2448 binder_set_nice(target_node->min_priority);
2449 cmd = BR_TRANSACTION;
2450 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002451 tr.target.ptr = 0;
2452 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002453 cmd = BR_REPLY;
2454 }
2455 tr.code = t->code;
2456 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06002457 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002458
2459 if (t->from) {
2460 struct task_struct *sender = t->from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09002461
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002462 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08002463 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002464 } else {
2465 tr.sender_pid = 0;
2466 }
2467
2468 tr.data_size = t->buffer->data_size;
2469 tr.offsets_size = t->buffer->offsets_size;
Todd Kjos19c98722017-06-29 12:01:40 -07002470 tr.data.ptr.buffer = (binder_uintptr_t)
2471 ((uintptr_t)t->buffer->data +
2472 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002473 tr.data.ptr.offsets = tr.data.ptr.buffer +
2474 ALIGN(t->buffer->data_size,
2475 sizeof(void *));
2476
2477 if (put_user(cmd, (uint32_t __user *)ptr))
2478 return -EFAULT;
2479 ptr += sizeof(uint32_t);
2480 if (copy_to_user(ptr, &tr, sizeof(tr)))
2481 return -EFAULT;
2482 ptr += sizeof(tr);
2483
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002484 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002485 binder_stat_br(proc, thread, cmd);
2486 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002487 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002488 proc->pid, thread->pid,
2489 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2490 "BR_REPLY",
2491 t->debug_id, t->from ? t->from->proc->pid : 0,
2492 t->from ? t->from->pid : 0, cmd,
2493 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002494 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002495
2496 list_del(&t->work.entry);
2497 t->buffer->allow_user_free = 1;
2498 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2499 t->to_parent = thread->transaction_stack;
2500 t->to_thread = thread;
2501 thread->transaction_stack = t;
2502 } else {
2503 t->buffer->transaction = NULL;
2504 kfree(t);
2505 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2506 }
2507 break;
2508 }
2509
2510done:
2511
2512 *consumed = ptr - buffer;
2513 if (proc->requested_threads + proc->ready_threads == 0 &&
2514 proc->requested_threads_started < proc->max_threads &&
2515 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2516 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2517 /*spawn a new thread if we leave this out */) {
2518 proc->requested_threads++;
2519 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302520 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002521 proc->pid, thread->pid);
2522 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2523 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002524 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002525 }
2526 return 0;
2527}
2528
2529static void binder_release_work(struct list_head *list)
2530{
2531 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09002532
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002533 while (!list_empty(list)) {
2534 w = list_first_entry(list, struct binder_work, entry);
2535 list_del_init(&w->entry);
2536 switch (w->type) {
2537 case BINDER_WORK_TRANSACTION: {
2538 struct binder_transaction *t;
2539
2540 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002541 if (t->buffer->target_node &&
2542 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002543 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002544 } else {
2545 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302546 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002547 t->debug_id);
2548 t->buffer->transaction = NULL;
2549 kfree(t);
2550 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2551 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002552 } break;
2553 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002554 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302555 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002556 kfree(w);
2557 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2558 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002559 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2560 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2561 struct binder_ref_death *death;
2562
2563 death = container_of(w, struct binder_ref_death, work);
2564 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002565 "undelivered death notification, %016llx\n",
2566 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002567 kfree(death);
2568 binder_stats_deleted(BINDER_STAT_DEATH);
2569 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002570 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302571 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002572 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002573 break;
2574 }
2575 }
2576
2577}
2578
2579static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2580{
2581 struct binder_thread *thread = NULL;
2582 struct rb_node *parent = NULL;
2583 struct rb_node **p = &proc->threads.rb_node;
2584
2585 while (*p) {
2586 parent = *p;
2587 thread = rb_entry(parent, struct binder_thread, rb_node);
2588
2589 if (current->pid < thread->pid)
2590 p = &(*p)->rb_left;
2591 else if (current->pid > thread->pid)
2592 p = &(*p)->rb_right;
2593 else
2594 break;
2595 }
2596 if (*p == NULL) {
2597 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2598 if (thread == NULL)
2599 return NULL;
2600 binder_stats_created(BINDER_STAT_THREAD);
2601 thread->proc = proc;
2602 thread->pid = current->pid;
2603 init_waitqueue_head(&thread->wait);
2604 INIT_LIST_HEAD(&thread->todo);
2605 rb_link_node(&thread->rb_node, parent, p);
2606 rb_insert_color(&thread->rb_node, &proc->threads);
2607 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2608 thread->return_error = BR_OK;
2609 thread->return_error2 = BR_OK;
2610 }
2611 return thread;
2612}
2613
2614static int binder_free_thread(struct binder_proc *proc,
2615 struct binder_thread *thread)
2616{
2617 struct binder_transaction *t;
2618 struct binder_transaction *send_reply = NULL;
2619 int active_transactions = 0;
2620
2621 rb_erase(&thread->rb_node, &proc->threads);
2622 t = thread->transaction_stack;
2623 if (t && t->to_thread == thread)
2624 send_reply = t;
2625 while (t) {
2626 active_transactions++;
2627 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302628 "release %d:%d transaction %d %s, still active\n",
2629 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002630 t->debug_id,
2631 (t->to_thread == thread) ? "in" : "out");
2632
2633 if (t->to_thread == thread) {
2634 t->to_proc = NULL;
2635 t->to_thread = NULL;
2636 if (t->buffer) {
2637 t->buffer->transaction = NULL;
2638 t->buffer = NULL;
2639 }
2640 t = t->to_parent;
2641 } else if (t->from == thread) {
2642 t->from = NULL;
2643 t = t->from_parent;
2644 } else
2645 BUG();
2646 }
2647 if (send_reply)
2648 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2649 binder_release_work(&thread->todo);
2650 kfree(thread);
2651 binder_stats_deleted(BINDER_STAT_THREAD);
2652 return active_transactions;
2653}
2654
2655static unsigned int binder_poll(struct file *filp,
2656 struct poll_table_struct *wait)
2657{
2658 struct binder_proc *proc = filp->private_data;
2659 struct binder_thread *thread = NULL;
2660 int wait_for_proc_work;
2661
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002662 binder_lock(__func__);
2663
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002664 thread = binder_get_thread(proc);
2665
2666 wait_for_proc_work = thread->transaction_stack == NULL &&
2667 list_empty(&thread->todo) && thread->return_error == BR_OK;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002668
2669 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002670
2671 if (wait_for_proc_work) {
2672 if (binder_has_proc_work(proc, thread))
2673 return POLLIN;
2674 poll_wait(filp, &proc->wait, wait);
2675 if (binder_has_proc_work(proc, thread))
2676 return POLLIN;
2677 } else {
2678 if (binder_has_thread_work(thread))
2679 return POLLIN;
2680 poll_wait(filp, &thread->wait, wait);
2681 if (binder_has_thread_work(thread))
2682 return POLLIN;
2683 }
2684 return 0;
2685}
2686
Tair Rzayev78260ac2014-06-03 22:27:21 +03002687static int binder_ioctl_write_read(struct file *filp,
2688 unsigned int cmd, unsigned long arg,
2689 struct binder_thread *thread)
2690{
2691 int ret = 0;
2692 struct binder_proc *proc = filp->private_data;
2693 unsigned int size = _IOC_SIZE(cmd);
2694 void __user *ubuf = (void __user *)arg;
2695 struct binder_write_read bwr;
2696
2697 if (size != sizeof(struct binder_write_read)) {
2698 ret = -EINVAL;
2699 goto out;
2700 }
2701 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2702 ret = -EFAULT;
2703 goto out;
2704 }
2705 binder_debug(BINDER_DEBUG_READ_WRITE,
2706 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2707 proc->pid, thread->pid,
2708 (u64)bwr.write_size, (u64)bwr.write_buffer,
2709 (u64)bwr.read_size, (u64)bwr.read_buffer);
2710
2711 if (bwr.write_size > 0) {
2712 ret = binder_thread_write(proc, thread,
2713 bwr.write_buffer,
2714 bwr.write_size,
2715 &bwr.write_consumed);
2716 trace_binder_write_done(ret);
2717 if (ret < 0) {
2718 bwr.read_consumed = 0;
2719 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2720 ret = -EFAULT;
2721 goto out;
2722 }
2723 }
2724 if (bwr.read_size > 0) {
2725 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2726 bwr.read_size,
2727 &bwr.read_consumed,
2728 filp->f_flags & O_NONBLOCK);
2729 trace_binder_read_done(ret);
2730 if (!list_empty(&proc->todo))
2731 wake_up_interruptible(&proc->wait);
2732 if (ret < 0) {
2733 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2734 ret = -EFAULT;
2735 goto out;
2736 }
2737 }
2738 binder_debug(BINDER_DEBUG_READ_WRITE,
2739 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2740 proc->pid, thread->pid,
2741 (u64)bwr.write_consumed, (u64)bwr.write_size,
2742 (u64)bwr.read_consumed, (u64)bwr.read_size);
2743 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2744 ret = -EFAULT;
2745 goto out;
2746 }
2747out:
2748 return ret;
2749}
2750
2751static int binder_ioctl_set_ctx_mgr(struct file *filp)
2752{
2753 int ret = 0;
2754 struct binder_proc *proc = filp->private_data;
Martijn Coenen342e5c92017-02-03 14:40:46 -08002755 struct binder_context *context = proc->context;
2756
Tair Rzayev78260ac2014-06-03 22:27:21 +03002757 kuid_t curr_euid = current_euid();
2758
Martijn Coenen342e5c92017-02-03 14:40:46 -08002759 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002760 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2761 ret = -EBUSY;
2762 goto out;
2763 }
Stephen Smalley79af7302015-01-21 10:54:10 -05002764 ret = security_binder_set_context_mgr(proc->tsk);
2765 if (ret < 0)
2766 goto out;
Martijn Coenen342e5c92017-02-03 14:40:46 -08002767 if (uid_valid(context->binder_context_mgr_uid)) {
2768 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002769 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2770 from_kuid(&init_user_ns, curr_euid),
2771 from_kuid(&init_user_ns,
Martijn Coenen342e5c92017-02-03 14:40:46 -08002772 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03002773 ret = -EPERM;
2774 goto out;
2775 }
2776 } else {
Martijn Coenen342e5c92017-02-03 14:40:46 -08002777 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03002778 }
Martijn Coenen342e5c92017-02-03 14:40:46 -08002779 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
2780 if (!context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002781 ret = -ENOMEM;
2782 goto out;
2783 }
Martijn Coenen342e5c92017-02-03 14:40:46 -08002784 context->binder_context_mgr_node->local_weak_refs++;
2785 context->binder_context_mgr_node->local_strong_refs++;
2786 context->binder_context_mgr_node->has_strong_ref = 1;
2787 context->binder_context_mgr_node->has_weak_ref = 1;
Tair Rzayev78260ac2014-06-03 22:27:21 +03002788out:
2789 return ret;
2790}
2791
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002792static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2793{
2794 int ret;
2795 struct binder_proc *proc = filp->private_data;
2796 struct binder_thread *thread;
2797 unsigned int size = _IOC_SIZE(cmd);
2798 void __user *ubuf = (void __user *)arg;
2799
Tair Rzayev78260ac2014-06-03 22:27:21 +03002800 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2801 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002802
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002803 trace_binder_ioctl(cmd, arg);
2804
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002805 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2806 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002807 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002808
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002809 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002810 thread = binder_get_thread(proc);
2811 if (thread == NULL) {
2812 ret = -ENOMEM;
2813 goto err;
2814 }
2815
2816 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002817 case BINDER_WRITE_READ:
2818 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2819 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002820 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002821 break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002822 case BINDER_SET_MAX_THREADS:
2823 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2824 ret = -EINVAL;
2825 goto err;
2826 }
2827 break;
2828 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03002829 ret = binder_ioctl_set_ctx_mgr(filp);
2830 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002831 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002832 break;
2833 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302834 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002835 proc->pid, thread->pid);
2836 binder_free_thread(proc, thread);
2837 thread = NULL;
2838 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02002839 case BINDER_VERSION: {
2840 struct binder_version __user *ver = ubuf;
2841
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002842 if (size != sizeof(struct binder_version)) {
2843 ret = -EINVAL;
2844 goto err;
2845 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02002846 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2847 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002848 ret = -EINVAL;
2849 goto err;
2850 }
2851 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02002852 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002853 default:
2854 ret = -EINVAL;
2855 goto err;
2856 }
2857 ret = 0;
2858err:
2859 if (thread)
2860 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002861 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002862 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2863 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05302864 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002865err_unlocked:
2866 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002867 return ret;
2868}
2869
2870static void binder_vma_open(struct vm_area_struct *vma)
2871{
2872 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09002873
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002874 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302875 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002876 proc->pid, vma->vm_start, vma->vm_end,
2877 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2878 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002879}
2880
2881static void binder_vma_close(struct vm_area_struct *vma)
2882{
2883 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09002884
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002885 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302886 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002887 proc->pid, vma->vm_start, vma->vm_end,
2888 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2889 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjos19c98722017-06-29 12:01:40 -07002890 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002891 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2892}
2893
Dave Jiang11bac802017-02-24 14:56:41 -08002894static int binder_vm_fault(struct vm_fault *vmf)
Vinayak Menonddac7d52014-06-02 18:17:59 +05302895{
2896 return VM_FAULT_SIGBUS;
2897}
2898
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07002899static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002900 .open = binder_vma_open,
2901 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05302902 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002903};
2904
Todd Kjos19c98722017-06-29 12:01:40 -07002905static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2906{
2907 int ret;
2908 struct binder_proc *proc = filp->private_data;
2909 const char *failure_string;
2910
2911 if (proc->tsk != current->group_leader)
2912 return -EINVAL;
2913
2914 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2915 vma->vm_end = vma->vm_start + SZ_4M;
2916
2917 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2918 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2919 __func__, proc->pid, vma->vm_start, vma->vm_end,
2920 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2921 (unsigned long)pgprot_val(vma->vm_page_prot));
2922
2923 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2924 ret = -EPERM;
2925 failure_string = "bad vm_flags";
2926 goto err_bad_arg;
2927 }
2928 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2929 vma->vm_ops = &binder_vm_ops;
2930 vma->vm_private_data = proc;
2931
2932 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
2933 if (ret)
2934 return ret;
2935 proc->files = get_files_struct(current);
2936 return 0;
2937
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002938err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04002939 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002940 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2941 return ret;
2942}
2943
2944static int binder_open(struct inode *nodp, struct file *filp)
2945{
2946 struct binder_proc *proc;
Martijn Coenenac4812c2017-02-03 14:40:48 -08002947 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002948
2949 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2950 current->group_leader->pid, current->pid);
2951
2952 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2953 if (proc == NULL)
2954 return -ENOMEM;
Todd Kjosc4ea41b2017-06-29 12:01:36 -07002955 get_task_struct(current->group_leader);
2956 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002957 INIT_LIST_HEAD(&proc->todo);
2958 init_waitqueue_head(&proc->wait);
2959 proc->default_priority = task_nice(current);
Martijn Coenenac4812c2017-02-03 14:40:48 -08002960 binder_dev = container_of(filp->private_data, struct binder_device,
2961 miscdev);
2962 proc->context = &binder_dev->context;
Todd Kjos19c98722017-06-29 12:01:40 -07002963 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002964
2965 binder_lock(__func__);
2966
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002967 binder_stats_created(BINDER_STAT_PROC);
2968 hlist_add_head(&proc->proc_node, &binder_procs);
2969 proc->pid = current->group_leader->pid;
2970 INIT_LIST_HEAD(&proc->delivered_death);
2971 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002972
2973 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002974
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07002975 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002976 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09002977
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002978 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08002979 /*
2980 * proc debug entries are shared between contexts, so
2981 * this will fail if the process tries to open the driver
2982 * again with a different context. The priting code will
2983 * anyway print all contexts that a given PID has, so this
2984 * is not a problem.
2985 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07002986 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen14db3182017-02-03 14:40:47 -08002987 binder_debugfs_dir_entry_proc,
2988 (void *)(unsigned long)proc->pid,
2989 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002990 }
2991
2992 return 0;
2993}
2994
2995static int binder_flush(struct file *filp, fl_owner_t id)
2996{
2997 struct binder_proc *proc = filp->private_data;
2998
2999 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3000
3001 return 0;
3002}
3003
3004static void binder_deferred_flush(struct binder_proc *proc)
3005{
3006 struct rb_node *n;
3007 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09003008
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003009 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3010 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09003011
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003012 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3013 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3014 wake_up_interruptible(&thread->wait);
3015 wake_count++;
3016 }
3017 }
3018 wake_up_interruptible_all(&proc->wait);
3019
3020 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3021 "binder_flush: %d woke %d threads\n", proc->pid,
3022 wake_count);
3023}
3024
3025static int binder_release(struct inode *nodp, struct file *filp)
3026{
3027 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09003028
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003029 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003030 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3031
3032 return 0;
3033}
3034
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003035static int binder_node_release(struct binder_node *node, int refs)
3036{
3037 struct binder_ref *ref;
3038 int death = 0;
3039
3040 list_del_init(&node->work.entry);
3041 binder_release_work(&node->async_todo);
3042
3043 if (hlist_empty(&node->refs)) {
3044 kfree(node);
3045 binder_stats_deleted(BINDER_STAT_NODE);
3046
3047 return refs;
3048 }
3049
3050 node->proc = NULL;
3051 node->local_strong_refs = 0;
3052 node->local_weak_refs = 0;
3053 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3054
3055 hlist_for_each_entry(ref, &node->refs, node_entry) {
3056 refs++;
3057
3058 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08003059 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003060
3061 death++;
3062
3063 if (list_empty(&ref->death->work.entry)) {
3064 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3065 list_add_tail(&ref->death->work.entry,
3066 &ref->proc->todo);
3067 wake_up_interruptible(&ref->proc->wait);
3068 } else
3069 BUG();
3070 }
3071
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003072 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3073 "node %d now dead, refs %d, death %d\n",
3074 node->debug_id, refs, death);
3075
3076 return refs;
3077}
3078
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003079static void binder_deferred_release(struct binder_proc *proc)
3080{
Martijn Coenen342e5c92017-02-03 14:40:46 -08003081 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003082 struct rb_node *n;
Todd Kjos19c98722017-06-29 12:01:40 -07003083 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003084
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003085 BUG_ON(proc->files);
3086
3087 hlist_del(&proc->proc_node);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003088
Martijn Coenen342e5c92017-02-03 14:40:46 -08003089 if (context->binder_context_mgr_node &&
3090 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003091 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003092 "%s: %d context_mgr_node gone\n",
3093 __func__, proc->pid);
Martijn Coenen342e5c92017-02-03 14:40:46 -08003094 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003095 }
3096
3097 threads = 0;
3098 active_transactions = 0;
3099 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003100 struct binder_thread *thread;
3101
3102 thread = rb_entry(n, struct binder_thread, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003103 threads++;
3104 active_transactions += binder_free_thread(proc, thread);
3105 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003106
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003107 nodes = 0;
3108 incoming_refs = 0;
3109 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003110 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003111
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003112 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003113 nodes++;
3114 rb_erase(&node->rb_node, &proc->nodes);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003115 incoming_refs = binder_node_release(node, incoming_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003116 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003117
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003118 outgoing_refs = 0;
3119 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003120 struct binder_ref *ref;
3121
3122 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003123 outgoing_refs++;
3124 binder_delete_ref(ref);
3125 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003126
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003127 binder_release_work(&proc->todo);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003128 binder_release_work(&proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003129
Todd Kjos19c98722017-06-29 12:01:40 -07003130 binder_alloc_deferred_release(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003131 binder_stats_deleted(BINDER_STAT_PROC);
3132
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003133 put_task_struct(proc->tsk);
3134
3135 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjos19c98722017-06-29 12:01:40 -07003136 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003137 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjos19c98722017-06-29 12:01:40 -07003138 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003139
3140 kfree(proc);
3141}
3142
3143static void binder_deferred_func(struct work_struct *work)
3144{
3145 struct binder_proc *proc;
3146 struct files_struct *files;
3147
3148 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09003149
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003150 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003151 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003152 mutex_lock(&binder_deferred_lock);
3153 if (!hlist_empty(&binder_deferred_list)) {
3154 proc = hlist_entry(binder_deferred_list.first,
3155 struct binder_proc, deferred_work_node);
3156 hlist_del_init(&proc->deferred_work_node);
3157 defer = proc->deferred_work;
3158 proc->deferred_work = 0;
3159 } else {
3160 proc = NULL;
3161 defer = 0;
3162 }
3163 mutex_unlock(&binder_deferred_lock);
3164
3165 files = NULL;
3166 if (defer & BINDER_DEFERRED_PUT_FILES) {
3167 files = proc->files;
3168 if (files)
3169 proc->files = NULL;
3170 }
3171
3172 if (defer & BINDER_DEFERRED_FLUSH)
3173 binder_deferred_flush(proc);
3174
3175 if (defer & BINDER_DEFERRED_RELEASE)
3176 binder_deferred_release(proc); /* frees proc */
3177
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003178 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003179 if (files)
3180 put_files_struct(files);
3181 } while (proc);
3182}
3183static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3184
3185static void
3186binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3187{
3188 mutex_lock(&binder_deferred_lock);
3189 proc->deferred_work |= defer;
3190 if (hlist_unhashed(&proc->deferred_work_node)) {
3191 hlist_add_head(&proc->deferred_work_node,
3192 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05303193 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003194 }
3195 mutex_unlock(&binder_deferred_lock);
3196}
3197
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003198static void print_binder_transaction(struct seq_file *m, const char *prefix,
3199 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003200{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003201 seq_printf(m,
3202 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3203 prefix, t->debug_id, t,
3204 t->from ? t->from->proc->pid : 0,
3205 t->from ? t->from->pid : 0,
3206 t->to_proc ? t->to_proc->pid : 0,
3207 t->to_thread ? t->to_thread->pid : 0,
3208 t->code, t->flags, t->priority, t->need_reply);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003209 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003210 seq_puts(m, " buffer free\n");
3211 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003212 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003213 if (t->buffer->target_node)
3214 seq_printf(m, " node %d",
3215 t->buffer->target_node->debug_id);
3216 seq_printf(m, " size %zd:%zd data %p\n",
3217 t->buffer->data_size, t->buffer->offsets_size,
3218 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003219}
3220
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003221static void print_binder_work(struct seq_file *m, const char *prefix,
3222 const char *transaction_prefix,
3223 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003224{
3225 struct binder_node *node;
3226 struct binder_transaction *t;
3227
3228 switch (w->type) {
3229 case BINDER_WORK_TRANSACTION:
3230 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003231 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003232 break;
3233 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003234 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003235 break;
3236 case BINDER_WORK_NODE:
3237 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003238 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3239 prefix, node->debug_id,
3240 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003241 break;
3242 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003243 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003244 break;
3245 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003246 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003247 break;
3248 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003249 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003250 break;
3251 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003252 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003253 break;
3254 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003255}
3256
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003257static void print_binder_thread(struct seq_file *m,
3258 struct binder_thread *thread,
3259 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003260{
3261 struct binder_transaction *t;
3262 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003263 size_t start_pos = m->count;
3264 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003265
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003266 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3267 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003268 t = thread->transaction_stack;
3269 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003270 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003271 print_binder_transaction(m,
3272 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003273 t = t->from_parent;
3274 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003275 print_binder_transaction(m,
3276 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003277 t = t->to_parent;
3278 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003279 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003280 t = NULL;
3281 }
3282 }
3283 list_for_each_entry(w, &thread->todo, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003284 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003285 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003286 if (!print_always && m->count == header_pos)
3287 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003288}
3289
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003290static void print_binder_node(struct seq_file *m, struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003291{
3292 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003293 struct binder_work *w;
3294 int count;
3295
3296 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08003297 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003298 count++;
3299
Arve Hjønnevågda498892014-02-21 14:40:26 -08003300 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3301 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003302 node->has_strong_ref, node->has_weak_ref,
3303 node->local_strong_refs, node->local_weak_refs,
3304 node->internal_strong_refs, count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003305 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003306 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003307 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003308 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003309 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003310 seq_puts(m, "\n");
3311 list_for_each_entry(w, &node->async_todo, entry)
3312 print_binder_work(m, " ",
3313 " pending async transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003314}
3315
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003316static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003317{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003318 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3319 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3320 ref->node->debug_id, ref->strong, ref->weak, ref->death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003321}
3322
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003323static void print_binder_proc(struct seq_file *m,
3324 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003325{
3326 struct binder_work *w;
3327 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003328 size_t start_pos = m->count;
3329 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003330
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003331 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08003332 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003333 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003334
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003335 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3336 print_binder_thread(m, rb_entry(n, struct binder_thread,
3337 rb_node), print_all);
3338 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003339 struct binder_node *node = rb_entry(n, struct binder_node,
3340 rb_node);
3341 if (print_all || node->has_async_transaction)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003342 print_binder_node(m, node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003343 }
3344 if (print_all) {
3345 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003346 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003347 n = rb_next(n))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003348 print_binder_ref(m, rb_entry(n, struct binder_ref,
3349 rb_node_desc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003350 }
Todd Kjos19c98722017-06-29 12:01:40 -07003351 binder_alloc_print_allocated(m, &proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003352 list_for_each_entry(w, &proc->todo, entry)
3353 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003354 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003355 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003356 break;
3357 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003358 if (!print_all && m->count == header_pos)
3359 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003360}
3361
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003362static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003363 "BR_ERROR",
3364 "BR_OK",
3365 "BR_TRANSACTION",
3366 "BR_REPLY",
3367 "BR_ACQUIRE_RESULT",
3368 "BR_DEAD_REPLY",
3369 "BR_TRANSACTION_COMPLETE",
3370 "BR_INCREFS",
3371 "BR_ACQUIRE",
3372 "BR_RELEASE",
3373 "BR_DECREFS",
3374 "BR_ATTEMPT_ACQUIRE",
3375 "BR_NOOP",
3376 "BR_SPAWN_LOOPER",
3377 "BR_FINISHED",
3378 "BR_DEAD_BINDER",
3379 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3380 "BR_FAILED_REPLY"
3381};
3382
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003383static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003384 "BC_TRANSACTION",
3385 "BC_REPLY",
3386 "BC_ACQUIRE_RESULT",
3387 "BC_FREE_BUFFER",
3388 "BC_INCREFS",
3389 "BC_ACQUIRE",
3390 "BC_RELEASE",
3391 "BC_DECREFS",
3392 "BC_INCREFS_DONE",
3393 "BC_ACQUIRE_DONE",
3394 "BC_ATTEMPT_ACQUIRE",
3395 "BC_REGISTER_LOOPER",
3396 "BC_ENTER_LOOPER",
3397 "BC_EXIT_LOOPER",
3398 "BC_REQUEST_DEATH_NOTIFICATION",
3399 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen79802402017-02-03 14:40:51 -08003400 "BC_DEAD_BINDER_DONE",
3401 "BC_TRANSACTION_SG",
3402 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003403};
3404
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003405static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003406 "proc",
3407 "thread",
3408 "node",
3409 "ref",
3410 "death",
3411 "transaction",
3412 "transaction_complete"
3413};
3414
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003415static void print_binder_stats(struct seq_file *m, const char *prefix,
3416 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003417{
3418 int i;
3419
3420 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003421 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003422 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3423 if (stats->bc[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003424 seq_printf(m, "%s%s: %d\n", prefix,
3425 binder_command_strings[i], stats->bc[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003426 }
3427
3428 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003429 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003430 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3431 if (stats->br[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003432 seq_printf(m, "%s%s: %d\n", prefix,
3433 binder_return_strings[i], stats->br[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003434 }
3435
3436 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003437 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003438 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003439 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003440 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3441 if (stats->obj_created[i] || stats->obj_deleted[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003442 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3443 binder_objstat_strings[i],
3444 stats->obj_created[i] - stats->obj_deleted[i],
3445 stats->obj_created[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003446 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003447}
3448
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003449static void print_binder_proc_stats(struct seq_file *m,
3450 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003451{
3452 struct binder_work *w;
3453 struct rb_node *n;
3454 int count, strong, weak;
3455
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003456 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08003457 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003458 count = 0;
3459 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3460 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003461 seq_printf(m, " threads: %d\n", count);
3462 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003463 " ready threads %d\n"
3464 " free async space %zd\n", proc->requested_threads,
3465 proc->requested_threads_started, proc->max_threads,
Todd Kjos19c98722017-06-29 12:01:40 -07003466 proc->ready_threads,
3467 binder_alloc_get_free_async_space(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003468 count = 0;
3469 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3470 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003471 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003472 count = 0;
3473 strong = 0;
3474 weak = 0;
3475 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3476 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3477 rb_node_desc);
3478 count++;
3479 strong += ref->strong;
3480 weak += ref->weak;
3481 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003482 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003483
Todd Kjos19c98722017-06-29 12:01:40 -07003484 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003485 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003486
3487 count = 0;
3488 list_for_each_entry(w, &proc->todo, entry) {
3489 switch (w->type) {
3490 case BINDER_WORK_TRANSACTION:
3491 count++;
3492 break;
3493 default:
3494 break;
3495 }
3496 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003497 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003498
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003499 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003500}
3501
3502
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003503static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003504{
3505 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003506 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003507
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003508 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003509
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003510 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003511
3512 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003513 seq_puts(m, "dead nodes:\n");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003514 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003515 print_binder_node(m, node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003516
Sasha Levinb67bfe02013-02-27 17:06:00 -08003517 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003518 print_binder_proc(m, proc, 1);
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003519 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003520 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003521}
3522
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003523static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003524{
3525 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003526
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003527 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003528
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003529 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003530
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003531 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003532
Sasha Levinb67bfe02013-02-27 17:06:00 -08003533 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003534 print_binder_proc_stats(m, proc);
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003535 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003536 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003537}
3538
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003539static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003540{
3541 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003542
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003543 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003544
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003545 seq_puts(m, "binder transactions:\n");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003546 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003547 print_binder_proc(m, proc, 0);
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003548 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003549 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003550}
3551
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003552static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003553{
Riley Andrews83050a42016-02-09 21:05:33 -08003554 struct binder_proc *itr;
Martijn Coenen14db3182017-02-03 14:40:47 -08003555 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003556
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003557 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08003558
3559 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen14db3182017-02-03 14:40:47 -08003560 if (itr->pid == pid) {
3561 seq_puts(m, "binder proc state:\n");
3562 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08003563 }
3564 }
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003565 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003566 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003567}
3568
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003569static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003570 struct binder_transaction_log_entry *e)
3571{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003572 seq_printf(m,
Martijn Coenen14db3182017-02-03 14:40:47 -08003573 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003574 e->debug_id, (e->call_type == 2) ? "reply" :
3575 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen14db3182017-02-03 14:40:47 -08003576 e->from_thread, e->to_proc, e->to_thread, e->context_name,
3577 e->to_node, e->target_handle, e->data_size, e->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003578}
3579
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003580static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003581{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003582 struct binder_transaction_log *log = m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003583 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003584
3585 if (log->full) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003586 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3587 print_binder_transaction_log_entry(m, &log->entry[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003588 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003589 for (i = 0; i < log->next; i++)
3590 print_binder_transaction_log_entry(m, &log->entry[i]);
3591 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003592}
3593
3594static const struct file_operations binder_fops = {
3595 .owner = THIS_MODULE,
3596 .poll = binder_poll,
3597 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003598 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003599 .mmap = binder_mmap,
3600 .open = binder_open,
3601 .flush = binder_flush,
3602 .release = binder_release,
3603};
3604
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003605BINDER_DEBUG_ENTRY(state);
3606BINDER_DEBUG_ENTRY(stats);
3607BINDER_DEBUG_ENTRY(transactions);
3608BINDER_DEBUG_ENTRY(transaction_log);
3609
Martijn Coenenac4812c2017-02-03 14:40:48 -08003610static int __init init_binder_device(const char *name)
3611{
3612 int ret;
3613 struct binder_device *binder_device;
3614
3615 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
3616 if (!binder_device)
3617 return -ENOMEM;
3618
3619 binder_device->miscdev.fops = &binder_fops;
3620 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
3621 binder_device->miscdev.name = name;
3622
3623 binder_device->context.binder_context_mgr_uid = INVALID_UID;
3624 binder_device->context.name = name;
3625
3626 ret = misc_register(&binder_device->miscdev);
3627 if (ret < 0) {
3628 kfree(binder_device);
3629 return ret;
3630 }
3631
3632 hlist_add_head(&binder_device->hlist, &binder_devices);
3633
3634 return ret;
3635}
3636
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003637static int __init binder_init(void)
3638{
3639 int ret;
Martijn Coenenac4812c2017-02-03 14:40:48 -08003640 char *device_name, *device_names;
3641 struct binder_device *device;
3642 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003643
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003644 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3645 if (binder_debugfs_dir_entry_root)
3646 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3647 binder_debugfs_dir_entry_root);
Martijn Coenenac4812c2017-02-03 14:40:48 -08003648
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003649 if (binder_debugfs_dir_entry_root) {
3650 debugfs_create_file("state",
3651 S_IRUGO,
3652 binder_debugfs_dir_entry_root,
3653 NULL,
3654 &binder_state_fops);
3655 debugfs_create_file("stats",
3656 S_IRUGO,
3657 binder_debugfs_dir_entry_root,
3658 NULL,
3659 &binder_stats_fops);
3660 debugfs_create_file("transactions",
3661 S_IRUGO,
3662 binder_debugfs_dir_entry_root,
3663 NULL,
3664 &binder_transactions_fops);
3665 debugfs_create_file("transaction_log",
3666 S_IRUGO,
3667 binder_debugfs_dir_entry_root,
3668 &binder_transaction_log,
3669 &binder_transaction_log_fops);
3670 debugfs_create_file("failed_transaction_log",
3671 S_IRUGO,
3672 binder_debugfs_dir_entry_root,
3673 &binder_transaction_log_failed,
3674 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003675 }
Martijn Coenenac4812c2017-02-03 14:40:48 -08003676
3677 /*
3678 * Copy the module_parameter string, because we don't want to
3679 * tokenize it in-place.
3680 */
3681 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
3682 if (!device_names) {
3683 ret = -ENOMEM;
3684 goto err_alloc_device_names_failed;
3685 }
3686 strcpy(device_names, binder_devices_param);
3687
3688 while ((device_name = strsep(&device_names, ","))) {
3689 ret = init_binder_device(device_name);
3690 if (ret)
3691 goto err_init_binder_device_failed;
3692 }
3693
3694 return ret;
3695
3696err_init_binder_device_failed:
3697 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
3698 misc_deregister(&device->miscdev);
3699 hlist_del(&device->hlist);
3700 kfree(device);
3701 }
3702err_alloc_device_names_failed:
3703 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
3704
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003705 return ret;
3706}
3707
3708device_initcall(binder_init);
3709
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003710#define CREATE_TRACE_POINTS
3711#include "binder_trace.h"
3712
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003713MODULE_LICENSE("GPL v2");