blob: fd0ff38be21e2954cdcd18324e10c367ab16fa99 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
Martijn Coenen995a36e2017-06-02 13:36:52 -070033 * proc->delivered_death and node->async_todo), as well as
34 * thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070079#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070080#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090081
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070082static DEFINE_MUTEX(binder_main_lock);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070083
84static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090085static DEFINE_MUTEX(binder_deferred_lock);
86
Martijn Coenen6b7c7122016-09-30 16:08:09 +020087static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090088static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070089static DEFINE_MUTEX(binder_procs_lock);
90
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070092static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090093
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070094static struct dentry *binder_debugfs_dir_entry_root;
95static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070096static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090097
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070098#define BINDER_DEBUG_ENTRY(name) \
99static int binder_##name##_open(struct inode *inode, struct file *file) \
100{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700101 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700102} \
103\
104static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110}
111
112static int binder_proc_show(struct seq_file *m, void *unused);
113BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900114
115/* This is only defined in include/asm-arm/sizes.h */
116#ifndef SZ_1K
117#define SZ_1K 0x400
118#endif
119
120#ifndef SZ_4M
121#define SZ_4M 0x400000
122#endif
123
124#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900144};
145static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200149static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150module_param_named(devices, binder_devices_param, charp, S_IRUGO);
151
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900152static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153static int binder_stop_on_user_error;
154
155static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157{
158 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900159
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164}
165module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168#define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400171 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900172 } while (0)
173
174#define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400177 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
Martijn Coenen00c80372016-07-13 12:06:49 +0200182#define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
Martijn Coenen5a6da532016-09-30 14:10:07 +0200187#define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200190#define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900193enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202};
203
204struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900209};
210
211static struct binder_stats binder_stats;
212
213static inline void binder_stats_deleted(enum binder_stat_types type)
214{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700215 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900216}
217
218static inline void binder_stats_created(enum binder_stat_types type)
219{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700220 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900221}
222
223struct binder_transaction_log_entry {
224 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700225 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200238 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900239};
240struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700241 atomic_t cur;
242 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900243 struct binder_transaction_log_entry entry[32];
244};
245static struct binder_transaction_log binder_transaction_log;
246static struct binder_transaction_log binder_transaction_log_failed;
247
248static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250{
251 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700252 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900253
Todd Kjos1cfe6272017-05-24 13:33:28 -0700254 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900255 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900265 return e;
266}
267
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200268struct binder_context {
269 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700270 struct mutex context_mgr_node_lock;
271
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200272 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200273 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200274};
275
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200276struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200280};
281
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700282/**
283 * struct binder_work - work enqueued on a worklist
284 * @entry: node enqueued on list
285 * @type: type of work to be performed
286 *
287 * There are separate work lists for proc, thread, and node (async).
288 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900289struct binder_work {
290 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700291
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900292 enum {
293 BINDER_WORK_TRANSACTION = 1,
294 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700295 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900296 BINDER_WORK_NODE,
297 BINDER_WORK_DEAD_BINDER,
298 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
299 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
300 } type;
301};
302
Todd Kjos858b8da2017-04-21 17:35:12 -0700303struct binder_error {
304 struct binder_work work;
305 uint32_t cmd;
306};
307
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700308/**
309 * struct binder_node - binder node bookkeeping
310 * @debug_id: unique ID for debugging
311 * (invariant after initialized)
312 * @lock: lock for node fields
313 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700314 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700315 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700316 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700317 * @dead_node: element for binder_dead_nodes list
318 * (protected by binder_dead_nodes_lock)
319 * @proc: binder_proc that owns this node
320 * (invariant after initialized)
321 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700322 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700323 * @internal_strong_refs: used to take strong references when
324 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700327 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700330 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700331 * (protected by @proc->inner_lock if @proc
332 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700333 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700334 * (protected by @proc->inner_lock while @proc
335 * is valid, and by binder_dead_nodes_lock
336 * if @proc is NULL. During inc/dec and node release
337 * it is also protected by @lock to provide safety
338 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700339 * @ptr: userspace pointer for node
340 * (invariant, no lock needed)
341 * @cookie: userspace cookie for node
342 * (invariant, no lock needed)
343 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700346 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700349 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700352 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700353 * (protected by @proc->inner_lock if @proc
354 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700355 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700356 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700362 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700363 *
364 * Bookkeeping structure for binder nodes.
365 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900366struct binder_node {
367 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700368 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900369 struct binder_work work;
370 union {
371 struct rb_node rb_node;
372 struct hlist_node dead_node;
373 };
374 struct binder_proc *proc;
375 struct hlist_head refs;
376 int internal_strong_refs;
377 int local_weak_refs;
378 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700379 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800380 binder_uintptr_t ptr;
381 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700382 struct {
383 /*
384 * bitfield elements protected by
385 * proc inner_lock
386 */
387 u8 has_strong_ref:1;
388 u8 pending_strong_ref:1;
389 u8 has_weak_ref:1;
390 u8 pending_weak_ref:1;
391 };
392 struct {
393 /*
394 * invariant after initialization
395 */
396 u8 accept_fds:1;
397 u8 min_priority;
398 };
399 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400 struct list_head async_todo;
401};
402
403struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700404 /**
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
408 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900409 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800410 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411};
412
Todd Kjosb0117bb2017-05-08 09:16:27 -0700413/**
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
419 *
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
424 */
425struct binder_ref_data {
426 int debug_id;
427 uint32_t desc;
428 int strong;
429 int weak;
430};
431
432/**
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700438 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenf9eac642017-05-22 11:26:23 -0700444 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700445 *
446 * Structure to track references from procA to target node (on procB). This
447 * structure is unsafe to access without holding @proc->outer_lock.
448 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900449struct binder_ref {
450 /* Lookups needed: */
451 /* node + proc => ref (transaction) */
452 /* desc + proc => ref (transaction, inc/dec ref) */
453 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700454 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900455 struct rb_node rb_node_desc;
456 struct rb_node rb_node_node;
457 struct hlist_node node_entry;
458 struct binder_proc *proc;
459 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900460 struct binder_ref_death *death;
461};
462
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900463enum binder_deferred_state {
464 BINDER_DEFERRED_PUT_FILES = 0x01,
465 BINDER_DEFERRED_FLUSH = 0x02,
466 BINDER_DEFERRED_RELEASE = 0x04,
467};
468
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700469/**
470 * struct binder_proc - binder process bookkeeping
471 * @proc_node: element for binder_procs list
472 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700473 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700474 * @nodes: rbtree of binder nodes associated with
475 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700476 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700477 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700478 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700479 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700480 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700481 * @pid PID of group_leader of process
482 * (invariant after initialized)
483 * @tsk task_struct for group_leader of process
484 * (invariant after initialized)
485 * @files files_struct for process
486 * (invariant after initialized)
487 * @deferred_work_node: element for binder_deferred_list
488 * (protected by binder_deferred_lock)
489 * @deferred_work: bitmap of deferred work to perform
490 * (protected by binder_deferred_lock)
491 * @is_dead: process is dead and awaiting free
492 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700493 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700494 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700495 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700496 * @wait: wait queue head to wait for proc work
497 * (invariant after initialized)
498 * @stats: per-process binder statistics
499 * (atomics, no lock needed)
500 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700501 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700502 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700503 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700504 * @requested_threads: number of binder threads requested but not
505 * yet started. In current implementation, can
506 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700507 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700508 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700509 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700510 * @ready_threads: number of threads waiting for proc work
Todd Kjosd600e902017-05-25 17:35:02 -0700511 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700512 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700513 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700514 * @default_priority: default scheduler priority
515 * (invariant after initialized)
516 * @debugfs_entry: debugfs node
517 * @alloc: binder allocator bookkeeping
518 * @context: binder_context for this proc
519 * (invariant after initialized)
520 * @inner_lock: can nest under outer_lock and/or node lock
521 * @outer_lock: no nesting under innor or node lock
522 * Lock order: 1) outer, 2) node, 3) inner
523 *
524 * Bookkeeping structure for binder processes
525 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900526struct binder_proc {
527 struct hlist_node proc_node;
528 struct rb_root threads;
529 struct rb_root nodes;
530 struct rb_root refs_by_desc;
531 struct rb_root refs_by_node;
532 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900533 struct task_struct *tsk;
534 struct files_struct *files;
535 struct hlist_node deferred_work_node;
536 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700537 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900538
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900539 struct list_head todo;
540 wait_queue_head_t wait;
541 struct binder_stats stats;
542 struct list_head delivered_death;
543 int max_threads;
544 int requested_threads;
545 int requested_threads_started;
546 int ready_threads;
Todd Kjos2f993e22017-05-12 14:42:55 -0700547 int tmp_ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900548 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700549 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700550 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200551 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700552 spinlock_t inner_lock;
553 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900554};
555
556enum {
557 BINDER_LOOPER_STATE_REGISTERED = 0x01,
558 BINDER_LOOPER_STATE_ENTERED = 0x02,
559 BINDER_LOOPER_STATE_EXITED = 0x04,
560 BINDER_LOOPER_STATE_INVALID = 0x08,
561 BINDER_LOOPER_STATE_WAITING = 0x10,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900562};
563
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700564/**
565 * struct binder_thread - binder thread bookkeeping
566 * @proc: binder process for this thread
567 * (invariant after initialization)
568 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700569 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700570 * @pid: PID for this thread
571 * (invariant after initialization)
572 * @looper: bitmap of looping state
573 * (only accessed by this thread)
574 * @looper_needs_return: looping thread needs to exit driver
575 * (no lock needed)
576 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700577 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700578 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700579 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700580 * @return_error: transaction errors reported by this thread
581 * (only accessed by this thread)
582 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700583 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700584 * @wait: wait queue for thread work
585 * @stats: per-thread statistics
586 * (atomics, no lock needed)
587 * @tmp_ref: temporary reference to indicate thread is in use
588 * (atomic since @proc->inner_lock cannot
589 * always be acquired)
590 * @is_dead: thread is dead and awaiting free
591 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700592 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700593 *
594 * Bookkeeping structure for binder threads.
595 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900596struct binder_thread {
597 struct binder_proc *proc;
598 struct rb_node rb_node;
599 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800600 int looper; /* only modified by this thread */
601 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900602 struct binder_transaction *transaction_stack;
603 struct list_head todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700604 struct binder_error return_error;
605 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900606 wait_queue_head_t wait;
607 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700608 atomic_t tmp_ref;
609 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900610};
611
612struct binder_transaction {
613 int debug_id;
614 struct binder_work work;
615 struct binder_thread *from;
616 struct binder_transaction *from_parent;
617 struct binder_proc *to_proc;
618 struct binder_thread *to_thread;
619 struct binder_transaction *to_parent;
620 unsigned need_reply:1;
621 /* unsigned is_dead:1; */ /* not used at the moment */
622
623 struct binder_buffer *buffer;
624 unsigned int code;
625 unsigned int flags;
626 long priority;
627 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600628 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700629 /**
630 * @lock: protects @from, @to_proc, and @to_thread
631 *
632 * @from, @to_proc, and @to_thread can be set to NULL
633 * during thread teardown
634 */
635 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900636};
637
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700638/**
639 * binder_proc_lock() - Acquire outer lock for given binder_proc
640 * @proc: struct binder_proc to acquire
641 *
642 * Acquires proc->outer_lock. Used to protect binder_ref
643 * structures associated with the given proc.
644 */
645#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
646static void
647_binder_proc_lock(struct binder_proc *proc, int line)
648{
649 binder_debug(BINDER_DEBUG_SPINLOCKS,
650 "%s: line=%d\n", __func__, line);
651 spin_lock(&proc->outer_lock);
652}
653
654/**
655 * binder_proc_unlock() - Release spinlock for given binder_proc
656 * @proc: struct binder_proc to acquire
657 *
658 * Release lock acquired via binder_proc_lock()
659 */
660#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
661static void
662_binder_proc_unlock(struct binder_proc *proc, int line)
663{
664 binder_debug(BINDER_DEBUG_SPINLOCKS,
665 "%s: line=%d\n", __func__, line);
666 spin_unlock(&proc->outer_lock);
667}
668
669/**
670 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
671 * @proc: struct binder_proc to acquire
672 *
673 * Acquires proc->inner_lock. Used to protect todo lists
674 */
675#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
676static void
677_binder_inner_proc_lock(struct binder_proc *proc, int line)
678{
679 binder_debug(BINDER_DEBUG_SPINLOCKS,
680 "%s: line=%d\n", __func__, line);
681 spin_lock(&proc->inner_lock);
682}
683
684/**
685 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
686 * @proc: struct binder_proc to acquire
687 *
688 * Release lock acquired via binder_inner_proc_lock()
689 */
690#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
691static void
692_binder_inner_proc_unlock(struct binder_proc *proc, int line)
693{
694 binder_debug(BINDER_DEBUG_SPINLOCKS,
695 "%s: line=%d\n", __func__, line);
696 spin_unlock(&proc->inner_lock);
697}
698
699/**
700 * binder_node_lock() - Acquire spinlock for given binder_node
701 * @node: struct binder_node to acquire
702 *
703 * Acquires node->lock. Used to protect binder_node fields
704 */
705#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
706static void
707_binder_node_lock(struct binder_node *node, int line)
708{
709 binder_debug(BINDER_DEBUG_SPINLOCKS,
710 "%s: line=%d\n", __func__, line);
711 spin_lock(&node->lock);
712}
713
714/**
715 * binder_node_unlock() - Release spinlock for given binder_proc
716 * @node: struct binder_node to acquire
717 *
718 * Release lock acquired via binder_node_lock()
719 */
720#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
721static void
722_binder_node_unlock(struct binder_node *node, int line)
723{
724 binder_debug(BINDER_DEBUG_SPINLOCKS,
725 "%s: line=%d\n", __func__, line);
726 spin_unlock(&node->lock);
727}
728
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700729/**
730 * binder_node_inner_lock() - Acquire node and inner locks
731 * @node: struct binder_node to acquire
732 *
733 * Acquires node->lock. If node->proc also acquires
734 * proc->inner_lock. Used to protect binder_node fields
735 */
736#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
737static void
738_binder_node_inner_lock(struct binder_node *node, int line)
739{
740 binder_debug(BINDER_DEBUG_SPINLOCKS,
741 "%s: line=%d\n", __func__, line);
742 spin_lock(&node->lock);
743 if (node->proc)
744 binder_inner_proc_lock(node->proc);
745}
746
747/**
748 * binder_node_unlock() - Release node and inner locks
749 * @node: struct binder_node to acquire
750 *
751 * Release lock acquired via binder_node_lock()
752 */
753#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
754static void
755_binder_node_inner_unlock(struct binder_node *node, int line)
756{
757 struct binder_proc *proc = node->proc;
758
759 binder_debug(BINDER_DEBUG_SPINLOCKS,
760 "%s: line=%d\n", __func__, line);
761 if (proc)
762 binder_inner_proc_unlock(proc);
763 spin_unlock(&node->lock);
764}
765
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700766static bool binder_worklist_empty_ilocked(struct list_head *list)
767{
768 return list_empty(list);
769}
770
771/**
772 * binder_worklist_empty() - Check if no items on the work list
773 * @proc: binder_proc associated with list
774 * @list: list to check
775 *
776 * Return: true if there are no items on list, else false
777 */
778static bool binder_worklist_empty(struct binder_proc *proc,
779 struct list_head *list)
780{
781 bool ret;
782
783 binder_inner_proc_lock(proc);
784 ret = binder_worklist_empty_ilocked(list);
785 binder_inner_proc_unlock(proc);
786 return ret;
787}
788
789static void
790binder_enqueue_work_ilocked(struct binder_work *work,
791 struct list_head *target_list)
792{
793 BUG_ON(target_list == NULL);
794 BUG_ON(work->entry.next && !list_empty(&work->entry));
795 list_add_tail(&work->entry, target_list);
796}
797
798/**
799 * binder_enqueue_work() - Add an item to the work list
800 * @proc: binder_proc associated with list
801 * @work: struct binder_work to add to list
802 * @target_list: list to add work to
803 *
804 * Adds the work to the specified list. Asserts that work
805 * is not already on a list.
806 */
807static void
808binder_enqueue_work(struct binder_proc *proc,
809 struct binder_work *work,
810 struct list_head *target_list)
811{
812 binder_inner_proc_lock(proc);
813 binder_enqueue_work_ilocked(work, target_list);
814 binder_inner_proc_unlock(proc);
815}
816
817static void
818binder_dequeue_work_ilocked(struct binder_work *work)
819{
820 list_del_init(&work->entry);
821}
822
823/**
824 * binder_dequeue_work() - Removes an item from the work list
825 * @proc: binder_proc associated with list
826 * @work: struct binder_work to remove from list
827 *
828 * Removes the specified work item from whatever list it is on.
829 * Can safely be called if work is not on any list.
830 */
831static void
832binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
833{
834 binder_inner_proc_lock(proc);
835 binder_dequeue_work_ilocked(work);
836 binder_inner_proc_unlock(proc);
837}
838
839static struct binder_work *binder_dequeue_work_head_ilocked(
840 struct list_head *list)
841{
842 struct binder_work *w;
843
844 w = list_first_entry_or_null(list, struct binder_work, entry);
845 if (w)
846 list_del_init(&w->entry);
847 return w;
848}
849
850/**
851 * binder_dequeue_work_head() - Dequeues the item at head of list
852 * @proc: binder_proc associated with list
853 * @list: list to dequeue head
854 *
855 * Removes the head of the list if there are items on the list
856 *
857 * Return: pointer dequeued binder_work, NULL if list was empty
858 */
859static struct binder_work *binder_dequeue_work_head(
860 struct binder_proc *proc,
861 struct list_head *list)
862{
863 struct binder_work *w;
864
865 binder_inner_proc_lock(proc);
866 w = binder_dequeue_work_head_ilocked(list);
867 binder_inner_proc_unlock(proc);
868 return w;
869}
870
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900871static void
872binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700873static void binder_free_thread(struct binder_thread *thread);
874static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700875static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900876
Sachin Kamatefde99c2012-08-17 16:39:36 +0530877static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900878{
879 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900880 unsigned long rlim_cur;
881 unsigned long irqs;
882
883 if (files == NULL)
884 return -ESRCH;
885
Al Virodcfadfa2012-08-12 17:27:30 -0400886 if (!lock_task_sighand(proc->tsk, &irqs))
887 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900888
Al Virodcfadfa2012-08-12 17:27:30 -0400889 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
890 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900891
Al Virodcfadfa2012-08-12 17:27:30 -0400892 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900893}
894
895/*
896 * copied from fd_install
897 */
898static void task_fd_install(
899 struct binder_proc *proc, unsigned int fd, struct file *file)
900{
Al Virof869e8a2012-08-15 21:06:33 -0400901 if (proc->files)
902 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900903}
904
905/*
906 * copied from sys_close
907 */
908static long task_close_fd(struct binder_proc *proc, unsigned int fd)
909{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900910 int retval;
911
Al Viro483ce1d2012-08-19 12:04:24 -0400912 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900913 return -ESRCH;
914
Al Viro483ce1d2012-08-19 12:04:24 -0400915 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900916 /* can't restart close syscall because file table entry was cleared */
917 if (unlikely(retval == -ERESTARTSYS ||
918 retval == -ERESTARTNOINTR ||
919 retval == -ERESTARTNOHAND ||
920 retval == -ERESTART_RESTARTBLOCK))
921 retval = -EINTR;
922
923 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900924}
925
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700926static inline void binder_lock(const char *tag)
927{
928 trace_binder_lock(tag);
929 mutex_lock(&binder_main_lock);
930 trace_binder_locked(tag);
931}
932
933static inline void binder_unlock(const char *tag)
934{
935 trace_binder_unlock(tag);
936 mutex_unlock(&binder_main_lock);
937}
938
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900939static void binder_set_nice(long nice)
940{
941 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900942
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900943 if (can_nice(current, nice)) {
944 set_user_nice(current, nice);
945 return;
946 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900947 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900948 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530949 "%d: nice value %ld not allowed use %ld instead\n",
950 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900951 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800952 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900953 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530954 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900955}
956
Todd Kjos425d23f2017-06-12 12:07:26 -0700957static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
958 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900959{
960 struct rb_node *n = proc->nodes.rb_node;
961 struct binder_node *node;
962
Todd Kjos425d23f2017-06-12 12:07:26 -0700963 BUG_ON(!spin_is_locked(&proc->inner_lock));
964
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900965 while (n) {
966 node = rb_entry(n, struct binder_node, rb_node);
967
968 if (ptr < node->ptr)
969 n = n->rb_left;
970 else if (ptr > node->ptr)
971 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -0700972 else {
973 /*
974 * take an implicit weak reference
975 * to ensure node stays alive until
976 * call to binder_put_node()
977 */
Todd Kjos425d23f2017-06-12 12:07:26 -0700978 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900979 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -0700980 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900981 }
982 return NULL;
983}
984
Todd Kjos425d23f2017-06-12 12:07:26 -0700985static struct binder_node *binder_get_node(struct binder_proc *proc,
986 binder_uintptr_t ptr)
987{
988 struct binder_node *node;
989
990 binder_inner_proc_lock(proc);
991 node = binder_get_node_ilocked(proc, ptr);
992 binder_inner_proc_unlock(proc);
993 return node;
994}
995
996static struct binder_node *binder_init_node_ilocked(
997 struct binder_proc *proc,
998 struct binder_node *new_node,
999 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001000{
1001 struct rb_node **p = &proc->nodes.rb_node;
1002 struct rb_node *parent = NULL;
1003 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001004 binder_uintptr_t ptr = fp ? fp->binder : 0;
1005 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1006 __u32 flags = fp ? fp->flags : 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001007
Todd Kjos425d23f2017-06-12 12:07:26 -07001008 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001009 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001010
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001011 parent = *p;
1012 node = rb_entry(parent, struct binder_node, rb_node);
1013
1014 if (ptr < node->ptr)
1015 p = &(*p)->rb_left;
1016 else if (ptr > node->ptr)
1017 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001018 else {
1019 /*
1020 * A matching node is already in
1021 * the rb tree. Abandon the init
1022 * and return it.
1023 */
1024 binder_inc_node_tmpref_ilocked(node);
1025 return node;
1026 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001027 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001028 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001029 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001030 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001031 rb_link_node(&node->rb_node, parent, p);
1032 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001033 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001034 node->proc = proc;
1035 node->ptr = ptr;
1036 node->cookie = cookie;
1037 node->work.type = BINDER_WORK_NODE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001038 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1039 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001040 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001041 INIT_LIST_HEAD(&node->work.entry);
1042 INIT_LIST_HEAD(&node->async_todo);
1043 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001044 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001045 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001046 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001047
1048 return node;
1049}
1050
1051static struct binder_node *binder_new_node(struct binder_proc *proc,
1052 struct flat_binder_object *fp)
1053{
1054 struct binder_node *node;
1055 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1056
1057 if (!new_node)
1058 return NULL;
1059 binder_inner_proc_lock(proc);
1060 node = binder_init_node_ilocked(proc, new_node, fp);
1061 binder_inner_proc_unlock(proc);
1062 if (node != new_node)
1063 /*
1064 * The node was already added by another thread
1065 */
1066 kfree(new_node);
1067
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001068 return node;
1069}
1070
Todd Kjose7f23ed2017-03-21 13:06:01 -07001071static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001072{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001073 kfree(node);
1074 binder_stats_deleted(BINDER_STAT_NODE);
1075}
1076
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001077static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1078 int internal,
1079 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001080{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001081 struct binder_proc *proc = node->proc;
1082
1083 BUG_ON(!spin_is_locked(&node->lock));
1084 if (proc)
1085 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001086 if (strong) {
1087 if (internal) {
1088 if (target_list == NULL &&
1089 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001090 !(node->proc &&
1091 node == node->proc->context->
1092 binder_context_mgr_node &&
1093 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301094 pr_err("invalid inc strong node for %d\n",
1095 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001096 return -EINVAL;
1097 }
1098 node->internal_strong_refs++;
1099 } else
1100 node->local_strong_refs++;
1101 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001102 binder_dequeue_work_ilocked(&node->work);
1103 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001104 }
1105 } else {
1106 if (!internal)
1107 node->local_weak_refs++;
1108 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1109 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301110 pr_err("invalid inc weak node for %d\n",
1111 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001112 return -EINVAL;
1113 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001114 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001115 }
1116 }
1117 return 0;
1118}
1119
Todd Kjose7f23ed2017-03-21 13:06:01 -07001120static int binder_inc_node(struct binder_node *node, int strong, int internal,
1121 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001122{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001123 int ret;
1124
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001125 binder_node_inner_lock(node);
1126 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1127 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001128
1129 return ret;
1130}
1131
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001132static bool binder_dec_node_nilocked(struct binder_node *node,
1133 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001134{
1135 struct binder_proc *proc = node->proc;
1136
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001137 BUG_ON(!spin_is_locked(&node->lock));
Todd Kjose7f23ed2017-03-21 13:06:01 -07001138 if (proc)
1139 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001140 if (strong) {
1141 if (internal)
1142 node->internal_strong_refs--;
1143 else
1144 node->local_strong_refs--;
1145 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001146 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001147 } else {
1148 if (!internal)
1149 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001150 if (node->local_weak_refs || node->tmp_refs ||
1151 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001152 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001153 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001154
1155 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001156 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001157 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001158 wake_up_interruptible(&node->proc->wait);
1159 }
1160 } else {
1161 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001162 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001163 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001164 binder_dequeue_work_ilocked(&node->work);
1165 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001166 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301167 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001168 node->debug_id);
1169 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001170 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001171 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001172 /*
1173 * tmp_refs could have changed so
1174 * check it again
1175 */
1176 if (node->tmp_refs) {
1177 spin_unlock(&binder_dead_nodes_lock);
1178 return false;
1179 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001180 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001181 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001182 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301183 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001184 node->debug_id);
1185 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001186 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001187 }
1188 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001189 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001190}
1191
Todd Kjose7f23ed2017-03-21 13:06:01 -07001192static void binder_dec_node(struct binder_node *node, int strong, int internal)
1193{
1194 bool free_node;
1195
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001196 binder_node_inner_lock(node);
1197 free_node = binder_dec_node_nilocked(node, strong, internal);
1198 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001199 if (free_node)
1200 binder_free_node(node);
1201}
1202
1203static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001204{
1205 /*
1206 * No call to binder_inc_node() is needed since we
1207 * don't need to inform userspace of any changes to
1208 * tmp_refs
1209 */
1210 node->tmp_refs++;
1211}
1212
1213/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001214 * binder_inc_node_tmpref() - take a temporary reference on node
1215 * @node: node to reference
1216 *
1217 * Take reference on node to prevent the node from being freed
1218 * while referenced only by a local variable. The inner lock is
1219 * needed to serialize with the node work on the queue (which
1220 * isn't needed after the node is dead). If the node is dead
1221 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1222 * node->tmp_refs against dead-node-only cases where the node
1223 * lock cannot be acquired (eg traversing the dead node list to
1224 * print nodes)
1225 */
1226static void binder_inc_node_tmpref(struct binder_node *node)
1227{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001228 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001229 if (node->proc)
1230 binder_inner_proc_lock(node->proc);
1231 else
1232 spin_lock(&binder_dead_nodes_lock);
1233 binder_inc_node_tmpref_ilocked(node);
1234 if (node->proc)
1235 binder_inner_proc_unlock(node->proc);
1236 else
1237 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001238 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001239}
1240
1241/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001242 * binder_dec_node_tmpref() - remove a temporary reference on node
1243 * @node: node to reference
1244 *
1245 * Release temporary reference on node taken via binder_inc_node_tmpref()
1246 */
1247static void binder_dec_node_tmpref(struct binder_node *node)
1248{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001249 bool free_node;
1250
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001251 binder_node_inner_lock(node);
1252 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001253 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001254 node->tmp_refs--;
1255 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001256 if (!node->proc)
1257 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001258 /*
1259 * Call binder_dec_node() to check if all refcounts are 0
1260 * and cleanup is needed. Calling with strong=0 and internal=1
1261 * causes no actual reference to be released in binder_dec_node().
1262 * If that changes, a change is needed here too.
1263 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001264 free_node = binder_dec_node_nilocked(node, 0, 1);
1265 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001266 if (free_node)
1267 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001268}
1269
1270static void binder_put_node(struct binder_node *node)
1271{
1272 binder_dec_node_tmpref(node);
1273}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001274
Todd Kjos5346bf32016-10-20 16:43:34 -07001275static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1276 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001277{
1278 struct rb_node *n = proc->refs_by_desc.rb_node;
1279 struct binder_ref *ref;
1280
1281 while (n) {
1282 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1283
Todd Kjosb0117bb2017-05-08 09:16:27 -07001284 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001285 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001286 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001287 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001288 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001289 binder_user_error("tried to use weak ref as strong ref\n");
1290 return NULL;
1291 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001292 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001293 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001294 }
1295 return NULL;
1296}
1297
Todd Kjosb0117bb2017-05-08 09:16:27 -07001298/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001299 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001300 * @proc: binder_proc that owns the ref
1301 * @node: binder_node of target
1302 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1303 *
1304 * Look up the ref for the given node and return it if it exists
1305 *
1306 * If it doesn't exist and the caller provides a newly allocated
1307 * ref, initialize the fields of the newly allocated ref and insert
1308 * into the given proc rb_trees and node refs list.
1309 *
1310 * Return: the ref for node. It is possible that another thread
1311 * allocated/initialized the ref first in which case the
1312 * returned ref would be different than the passed-in
1313 * new_ref. new_ref must be kfree'd by the caller in
1314 * this case.
1315 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001316static struct binder_ref *binder_get_ref_for_node_olocked(
1317 struct binder_proc *proc,
1318 struct binder_node *node,
1319 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001320{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001321 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001322 struct rb_node **p = &proc->refs_by_node.rb_node;
1323 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001324 struct binder_ref *ref;
1325 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001326
1327 while (*p) {
1328 parent = *p;
1329 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1330
1331 if (node < ref->node)
1332 p = &(*p)->rb_left;
1333 else if (node > ref->node)
1334 p = &(*p)->rb_right;
1335 else
1336 return ref;
1337 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001338 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001339 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001340
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001341 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001342 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001343 new_ref->proc = proc;
1344 new_ref->node = node;
1345 rb_link_node(&new_ref->rb_node_node, parent, p);
1346 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1347
Todd Kjosb0117bb2017-05-08 09:16:27 -07001348 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001349 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1350 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001351 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001352 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001353 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001354 }
1355
1356 p = &proc->refs_by_desc.rb_node;
1357 while (*p) {
1358 parent = *p;
1359 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1360
Todd Kjosb0117bb2017-05-08 09:16:27 -07001361 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001362 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001363 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001364 p = &(*p)->rb_right;
1365 else
1366 BUG();
1367 }
1368 rb_link_node(&new_ref->rb_node_desc, parent, p);
1369 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001370
1371 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001372 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001373
Todd Kjos4cbe5752017-05-01 17:21:51 -07001374 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1375 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001376 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001377 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001378 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001379 return new_ref;
1380}
1381
Todd Kjos5346bf32016-10-20 16:43:34 -07001382static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001383{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001384 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001385
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001386 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301387 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001388 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301389 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001390
1391 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1392 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001393
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001394 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001395 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001396 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001397
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001398 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001399 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1400 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001401 /*
1402 * Clear ref->node unless we want the caller to free the node
1403 */
1404 if (!delete_node) {
1405 /*
1406 * The caller uses ref->node to determine
1407 * whether the node needs to be freed. Clear
1408 * it since the node is still alive.
1409 */
1410 ref->node = NULL;
1411 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001412
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001413 if (ref->death) {
1414 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301415 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001416 ref->proc->pid, ref->data.debug_id,
1417 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001418 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001419 binder_stats_deleted(BINDER_STAT_DEATH);
1420 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001421 binder_stats_deleted(BINDER_STAT_REF);
1422}
1423
Todd Kjosb0117bb2017-05-08 09:16:27 -07001424/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001425 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001426 * @ref: ref to be incremented
1427 * @strong: if true, strong increment, else weak
1428 * @target_list: list to queue node work on
1429 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001430 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001431 *
1432 * Return: 0, if successful, else errno
1433 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001434static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1435 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001436{
1437 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001438
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001439 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001440 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001441 ret = binder_inc_node(ref->node, 1, 1, target_list);
1442 if (ret)
1443 return ret;
1444 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001445 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001446 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001447 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001448 ret = binder_inc_node(ref->node, 0, 1, target_list);
1449 if (ret)
1450 return ret;
1451 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001452 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001453 }
1454 return 0;
1455}
1456
Todd Kjosb0117bb2017-05-08 09:16:27 -07001457/**
1458 * binder_dec_ref() - dec the ref for given handle
1459 * @ref: ref to be decremented
1460 * @strong: if true, strong decrement, else weak
1461 *
1462 * Decrement the ref.
1463 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001464 * Return: true if ref is cleaned up and ready to be freed
1465 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001466static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001467{
1468 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001469 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301470 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001471 ref->proc->pid, ref->data.debug_id,
1472 ref->data.desc, ref->data.strong,
1473 ref->data.weak);
1474 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001475 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001476 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001477 if (ref->data.strong == 0)
1478 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001479 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001480 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301481 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001482 ref->proc->pid, ref->data.debug_id,
1483 ref->data.desc, ref->data.strong,
1484 ref->data.weak);
1485 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001486 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001487 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001488 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001489 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001490 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001491 return true;
1492 }
1493 return false;
1494}
1495
1496/**
1497 * binder_get_node_from_ref() - get the node from the given proc/desc
1498 * @proc: proc containing the ref
1499 * @desc: the handle associated with the ref
1500 * @need_strong_ref: if true, only return node if ref is strong
1501 * @rdata: the id/refcount data for the ref
1502 *
1503 * Given a proc and ref handle, return the associated binder_node
1504 *
1505 * Return: a binder_node or NULL if not found or not strong when strong required
1506 */
1507static struct binder_node *binder_get_node_from_ref(
1508 struct binder_proc *proc,
1509 u32 desc, bool need_strong_ref,
1510 struct binder_ref_data *rdata)
1511{
1512 struct binder_node *node;
1513 struct binder_ref *ref;
1514
Todd Kjos5346bf32016-10-20 16:43:34 -07001515 binder_proc_lock(proc);
1516 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001517 if (!ref)
1518 goto err_no_ref;
1519 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001520 /*
1521 * Take an implicit reference on the node to ensure
1522 * it stays alive until the call to binder_put_node()
1523 */
1524 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001525 if (rdata)
1526 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001527 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001528
1529 return node;
1530
1531err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001532 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001533 return NULL;
1534}
1535
1536/**
1537 * binder_free_ref() - free the binder_ref
1538 * @ref: ref to free
1539 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001540 * Free the binder_ref. Free the binder_node indicated by ref->node
1541 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001542 */
1543static void binder_free_ref(struct binder_ref *ref)
1544{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001545 if (ref->node)
1546 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001547 kfree(ref->death);
1548 kfree(ref);
1549}
1550
1551/**
1552 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1553 * @proc: proc containing the ref
1554 * @desc: the handle associated with the ref
1555 * @increment: true=inc reference, false=dec reference
1556 * @strong: true=strong reference, false=weak reference
1557 * @rdata: the id/refcount data for the ref
1558 *
1559 * Given a proc and ref handle, increment or decrement the ref
1560 * according to "increment" arg.
1561 *
1562 * Return: 0 if successful, else errno
1563 */
1564static int binder_update_ref_for_handle(struct binder_proc *proc,
1565 uint32_t desc, bool increment, bool strong,
1566 struct binder_ref_data *rdata)
1567{
1568 int ret = 0;
1569 struct binder_ref *ref;
1570 bool delete_ref = false;
1571
Todd Kjos5346bf32016-10-20 16:43:34 -07001572 binder_proc_lock(proc);
1573 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001574 if (!ref) {
1575 ret = -EINVAL;
1576 goto err_no_ref;
1577 }
1578 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001579 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001580 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001581 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001582
1583 if (rdata)
1584 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001585 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001586
1587 if (delete_ref)
1588 binder_free_ref(ref);
1589 return ret;
1590
1591err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001592 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001593 return ret;
1594}
1595
1596/**
1597 * binder_dec_ref_for_handle() - dec the ref for given handle
1598 * @proc: proc containing the ref
1599 * @desc: the handle associated with the ref
1600 * @strong: true=strong reference, false=weak reference
1601 * @rdata: the id/refcount data for the ref
1602 *
1603 * Just calls binder_update_ref_for_handle() to decrement the ref.
1604 *
1605 * Return: 0 if successful, else errno
1606 */
1607static int binder_dec_ref_for_handle(struct binder_proc *proc,
1608 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1609{
1610 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1611}
1612
1613
1614/**
1615 * binder_inc_ref_for_node() - increment the ref for given proc/node
1616 * @proc: proc containing the ref
1617 * @node: target node
1618 * @strong: true=strong reference, false=weak reference
1619 * @target_list: worklist to use if node is incremented
1620 * @rdata: the id/refcount data for the ref
1621 *
1622 * Given a proc and node, increment the ref. Create the ref if it
1623 * doesn't already exist
1624 *
1625 * Return: 0 if successful, else errno
1626 */
1627static int binder_inc_ref_for_node(struct binder_proc *proc,
1628 struct binder_node *node,
1629 bool strong,
1630 struct list_head *target_list,
1631 struct binder_ref_data *rdata)
1632{
1633 struct binder_ref *ref;
1634 struct binder_ref *new_ref = NULL;
1635 int ret = 0;
1636
Todd Kjos5346bf32016-10-20 16:43:34 -07001637 binder_proc_lock(proc);
1638 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001639 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001640 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001641 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1642 if (!new_ref)
1643 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001644 binder_proc_lock(proc);
1645 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001646 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001647 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001648 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001649 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001650 if (new_ref && ref != new_ref)
1651 /*
1652 * Another thread created the ref first so
1653 * free the one we allocated
1654 */
1655 kfree(new_ref);
1656 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001657}
1658
Martijn Coenen995a36e2017-06-02 13:36:52 -07001659static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1660 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001661{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001662 BUG_ON(!target_thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07001663 BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
Todd Kjos21ef40a2017-03-30 18:02:13 -07001664 BUG_ON(target_thread->transaction_stack != t);
1665 BUG_ON(target_thread->transaction_stack->from != target_thread);
1666 target_thread->transaction_stack =
1667 target_thread->transaction_stack->from_parent;
1668 t->from = NULL;
1669}
1670
Todd Kjos2f993e22017-05-12 14:42:55 -07001671/**
1672 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1673 * @thread: thread to decrement
1674 *
1675 * A thread needs to be kept alive while being used to create or
1676 * handle a transaction. binder_get_txn_from() is used to safely
1677 * extract t->from from a binder_transaction and keep the thread
1678 * indicated by t->from from being freed. When done with that
1679 * binder_thread, this function is called to decrement the
1680 * tmp_ref and free if appropriate (thread has been released
1681 * and no transaction being processed by the driver)
1682 */
1683static void binder_thread_dec_tmpref(struct binder_thread *thread)
1684{
1685 /*
1686 * atomic is used to protect the counter value while
1687 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07001688 */
Todd Kjosb4827902017-05-25 15:52:17 -07001689 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001690 atomic_dec(&thread->tmp_ref);
1691 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07001692 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001693 binder_free_thread(thread);
1694 return;
1695 }
Todd Kjosb4827902017-05-25 15:52:17 -07001696 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001697}
1698
1699/**
1700 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1701 * @proc: proc to decrement
1702 *
1703 * A binder_proc needs to be kept alive while being used to create or
1704 * handle a transaction. proc->tmp_ref is incremented when
1705 * creating a new transaction or the binder_proc is currently in-use
1706 * by threads that are being released. When done with the binder_proc,
1707 * this function is called to decrement the counter and free the
1708 * proc if appropriate (proc has been released, all threads have
1709 * been released and not currenly in-use to process a transaction).
1710 */
1711static void binder_proc_dec_tmpref(struct binder_proc *proc)
1712{
Todd Kjosb4827902017-05-25 15:52:17 -07001713 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001714 proc->tmp_ref--;
1715 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1716 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07001717 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001718 binder_free_proc(proc);
1719 return;
1720 }
Todd Kjosb4827902017-05-25 15:52:17 -07001721 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001722}
1723
1724/**
1725 * binder_get_txn_from() - safely extract the "from" thread in transaction
1726 * @t: binder transaction for t->from
1727 *
1728 * Atomically return the "from" thread and increment the tmp_ref
1729 * count for the thread to ensure it stays alive until
1730 * binder_thread_dec_tmpref() is called.
1731 *
1732 * Return: the value of t->from
1733 */
1734static struct binder_thread *binder_get_txn_from(
1735 struct binder_transaction *t)
1736{
1737 struct binder_thread *from;
1738
1739 spin_lock(&t->lock);
1740 from = t->from;
1741 if (from)
1742 atomic_inc(&from->tmp_ref);
1743 spin_unlock(&t->lock);
1744 return from;
1745}
1746
Martijn Coenen995a36e2017-06-02 13:36:52 -07001747/**
1748 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1749 * @t: binder transaction for t->from
1750 *
1751 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1752 * to guarantee that the thread cannot be released while operating on it.
1753 * The caller must call binder_inner_proc_unlock() to release the inner lock
1754 * as well as call binder_dec_thread_txn() to release the reference.
1755 *
1756 * Return: the value of t->from
1757 */
1758static struct binder_thread *binder_get_txn_from_and_acq_inner(
1759 struct binder_transaction *t)
1760{
1761 struct binder_thread *from;
1762
1763 from = binder_get_txn_from(t);
1764 if (!from)
1765 return NULL;
1766 binder_inner_proc_lock(from->proc);
1767 if (t->from) {
1768 BUG_ON(from != t->from);
1769 return from;
1770 }
1771 binder_inner_proc_unlock(from->proc);
1772 binder_thread_dec_tmpref(from);
1773 return NULL;
1774}
1775
Todd Kjos21ef40a2017-03-30 18:02:13 -07001776static void binder_free_transaction(struct binder_transaction *t)
1777{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001778 if (t->buffer)
1779 t->buffer->transaction = NULL;
1780 kfree(t);
1781 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1782}
1783
1784static void binder_send_failed_reply(struct binder_transaction *t,
1785 uint32_t error_code)
1786{
1787 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001788 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001789
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001790 BUG_ON(t->flags & TF_ONE_WAY);
1791 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07001792 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001793 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07001794 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1795 "send failed reply for transaction %d to %d:%d\n",
1796 t->debug_id,
1797 target_thread->proc->pid,
1798 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001799
Martijn Coenen995a36e2017-06-02 13:36:52 -07001800 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07001801 if (target_thread->reply_error.cmd == BR_OK) {
1802 target_thread->reply_error.cmd = error_code;
Martijn Coenen995a36e2017-06-02 13:36:52 -07001803 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001804 &target_thread->reply_error.work,
Todd Kjos858b8da2017-04-21 17:35:12 -07001805 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001806 wake_up_interruptible(&target_thread->wait);
1807 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07001808 WARN(1, "Unexpected reply error: %u\n",
1809 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001810 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07001811 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001812 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07001813 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001814 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001815 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001816 next = t->from_parent;
1817
1818 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1819 "send failed reply for transaction %d, target dead\n",
1820 t->debug_id);
1821
Todd Kjos21ef40a2017-03-30 18:02:13 -07001822 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001823 if (next == NULL) {
1824 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1825 "reply failed, no target thread at root\n");
1826 return;
1827 }
1828 t = next;
1829 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1830 "reply failed, no target thread -- retry %d\n",
1831 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001832 }
1833}
1834
Martijn Coenen00c80372016-07-13 12:06:49 +02001835/**
1836 * binder_validate_object() - checks for a valid metadata object in a buffer.
1837 * @buffer: binder_buffer that we're parsing.
1838 * @offset: offset in the buffer at which to validate an object.
1839 *
1840 * Return: If there's a valid metadata object at @offset in @buffer, the
1841 * size of that object. Otherwise, it returns zero.
1842 */
1843static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1844{
1845 /* Check if we can read a header first */
1846 struct binder_object_header *hdr;
1847 size_t object_size = 0;
1848
1849 if (offset > buffer->data_size - sizeof(*hdr) ||
1850 buffer->data_size < sizeof(*hdr) ||
1851 !IS_ALIGNED(offset, sizeof(u32)))
1852 return 0;
1853
1854 /* Ok, now see if we can read a complete object. */
1855 hdr = (struct binder_object_header *)(buffer->data + offset);
1856 switch (hdr->type) {
1857 case BINDER_TYPE_BINDER:
1858 case BINDER_TYPE_WEAK_BINDER:
1859 case BINDER_TYPE_HANDLE:
1860 case BINDER_TYPE_WEAK_HANDLE:
1861 object_size = sizeof(struct flat_binder_object);
1862 break;
1863 case BINDER_TYPE_FD:
1864 object_size = sizeof(struct binder_fd_object);
1865 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02001866 case BINDER_TYPE_PTR:
1867 object_size = sizeof(struct binder_buffer_object);
1868 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02001869 case BINDER_TYPE_FDA:
1870 object_size = sizeof(struct binder_fd_array_object);
1871 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02001872 default:
1873 return 0;
1874 }
1875 if (offset <= buffer->data_size - object_size &&
1876 buffer->data_size >= object_size)
1877 return object_size;
1878 else
1879 return 0;
1880}
1881
Martijn Coenen5a6da532016-09-30 14:10:07 +02001882/**
1883 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1884 * @b: binder_buffer containing the object
1885 * @index: index in offset array at which the binder_buffer_object is
1886 * located
1887 * @start: points to the start of the offset array
1888 * @num_valid: the number of valid offsets in the offset array
1889 *
1890 * Return: If @index is within the valid range of the offset array
1891 * described by @start and @num_valid, and if there's a valid
1892 * binder_buffer_object at the offset found in index @index
1893 * of the offset array, that object is returned. Otherwise,
1894 * %NULL is returned.
1895 * Note that the offset found in index @index itself is not
1896 * verified; this function assumes that @num_valid elements
1897 * from @start were previously verified to have valid offsets.
1898 */
1899static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1900 binder_size_t index,
1901 binder_size_t *start,
1902 binder_size_t num_valid)
1903{
1904 struct binder_buffer_object *buffer_obj;
1905 binder_size_t *offp;
1906
1907 if (index >= num_valid)
1908 return NULL;
1909
1910 offp = start + index;
1911 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1912 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1913 return NULL;
1914
1915 return buffer_obj;
1916}
1917
1918/**
1919 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1920 * @b: transaction buffer
1921 * @objects_start start of objects buffer
1922 * @buffer: binder_buffer_object in which to fix up
1923 * @offset: start offset in @buffer to fix up
1924 * @last_obj: last binder_buffer_object that we fixed up in
1925 * @last_min_offset: minimum fixup offset in @last_obj
1926 *
1927 * Return: %true if a fixup in buffer @buffer at offset @offset is
1928 * allowed.
1929 *
1930 * For safety reasons, we only allow fixups inside a buffer to happen
1931 * at increasing offsets; additionally, we only allow fixup on the last
1932 * buffer object that was verified, or one of its parents.
1933 *
1934 * Example of what is allowed:
1935 *
1936 * A
1937 * B (parent = A, offset = 0)
1938 * C (parent = A, offset = 16)
1939 * D (parent = C, offset = 0)
1940 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1941 *
1942 * Examples of what is not allowed:
1943 *
1944 * Decreasing offsets within the same parent:
1945 * A
1946 * C (parent = A, offset = 16)
1947 * B (parent = A, offset = 0) // decreasing offset within A
1948 *
1949 * Referring to a parent that wasn't the last object or any of its parents:
1950 * A
1951 * B (parent = A, offset = 0)
1952 * C (parent = A, offset = 0)
1953 * C (parent = A, offset = 16)
1954 * D (parent = B, offset = 0) // B is not A or any of A's parents
1955 */
1956static bool binder_validate_fixup(struct binder_buffer *b,
1957 binder_size_t *objects_start,
1958 struct binder_buffer_object *buffer,
1959 binder_size_t fixup_offset,
1960 struct binder_buffer_object *last_obj,
1961 binder_size_t last_min_offset)
1962{
1963 if (!last_obj) {
1964 /* Nothing to fix up in */
1965 return false;
1966 }
1967
1968 while (last_obj != buffer) {
1969 /*
1970 * Safe to retrieve the parent of last_obj, since it
1971 * was already previously verified by the driver.
1972 */
1973 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1974 return false;
1975 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1976 last_obj = (struct binder_buffer_object *)
1977 (b->data + *(objects_start + last_obj->parent));
1978 }
1979 return (fixup_offset >= last_min_offset);
1980}
1981
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001982static void binder_transaction_buffer_release(struct binder_proc *proc,
1983 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001984 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001985{
Martijn Coenen5a6da532016-09-30 14:10:07 +02001986 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001987 int debug_id = buffer->debug_id;
1988
1989 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301990 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001991 proc->pid, buffer->debug_id,
1992 buffer->data_size, buffer->offsets_size, failed_at);
1993
1994 if (buffer->target_node)
1995 binder_dec_node(buffer->target_node, 1, 0);
1996
Martijn Coenen5a6da532016-09-30 14:10:07 +02001997 off_start = (binder_size_t *)(buffer->data +
1998 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001999 if (failed_at)
2000 off_end = failed_at;
2001 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002002 off_end = (void *)off_start + buffer->offsets_size;
2003 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002004 struct binder_object_header *hdr;
2005 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002006
Martijn Coenen00c80372016-07-13 12:06:49 +02002007 if (object_size == 0) {
2008 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002009 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002010 continue;
2011 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002012 hdr = (struct binder_object_header *)(buffer->data + *offp);
2013 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002014 case BINDER_TYPE_BINDER:
2015 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002016 struct flat_binder_object *fp;
2017 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002018
Martijn Coenen00c80372016-07-13 12:06:49 +02002019 fp = to_flat_binder_object(hdr);
2020 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002021 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002022 pr_err("transaction release %d bad node %016llx\n",
2023 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002024 break;
2025 }
2026 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002027 " node %d u%016llx\n",
2028 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002029 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2030 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002031 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002032 } break;
2033 case BINDER_TYPE_HANDLE:
2034 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002035 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002036 struct binder_ref_data rdata;
2037 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002038
Martijn Coenen00c80372016-07-13 12:06:49 +02002039 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002040 ret = binder_dec_ref_for_handle(proc, fp->handle,
2041 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2042
2043 if (ret) {
2044 pr_err("transaction release %d bad handle %d, ret = %d\n",
2045 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002046 break;
2047 }
2048 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002049 " ref %d desc %d\n",
2050 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002051 } break;
2052
Martijn Coenen00c80372016-07-13 12:06:49 +02002053 case BINDER_TYPE_FD: {
2054 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2055
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002056 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002057 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002058 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002059 task_close_fd(proc, fp->fd);
2060 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002061 case BINDER_TYPE_PTR:
2062 /*
2063 * Nothing to do here, this will get cleaned up when the
2064 * transaction buffer gets freed
2065 */
2066 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002067 case BINDER_TYPE_FDA: {
2068 struct binder_fd_array_object *fda;
2069 struct binder_buffer_object *parent;
2070 uintptr_t parent_buffer;
2071 u32 *fd_array;
2072 size_t fd_index;
2073 binder_size_t fd_buf_size;
2074
2075 fda = to_binder_fd_array_object(hdr);
2076 parent = binder_validate_ptr(buffer, fda->parent,
2077 off_start,
2078 offp - off_start);
2079 if (!parent) {
2080 pr_err("transaction release %d bad parent offset",
2081 debug_id);
2082 continue;
2083 }
2084 /*
2085 * Since the parent was already fixed up, convert it
2086 * back to kernel address space to access it
2087 */
2088 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002089 binder_alloc_get_user_buffer_offset(
2090 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002091
2092 fd_buf_size = sizeof(u32) * fda->num_fds;
2093 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2094 pr_err("transaction release %d invalid number of fds (%lld)\n",
2095 debug_id, (u64)fda->num_fds);
2096 continue;
2097 }
2098 if (fd_buf_size > parent->length ||
2099 fda->parent_offset > parent->length - fd_buf_size) {
2100 /* No space for all file descriptors here. */
2101 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2102 debug_id, (u64)fda->num_fds);
2103 continue;
2104 }
2105 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2106 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2107 task_close_fd(proc, fd_array[fd_index]);
2108 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002109 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002110 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002111 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002112 break;
2113 }
2114 }
2115}
2116
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002117static int binder_translate_binder(struct flat_binder_object *fp,
2118 struct binder_transaction *t,
2119 struct binder_thread *thread)
2120{
2121 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002122 struct binder_proc *proc = thread->proc;
2123 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002124 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002125 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002126
2127 node = binder_get_node(proc, fp->binder);
2128 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002129 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002130 if (!node)
2131 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002132 }
2133 if (fp->cookie != node->cookie) {
2134 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2135 proc->pid, thread->pid, (u64)fp->binder,
2136 node->debug_id, (u64)fp->cookie,
2137 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002138 ret = -EINVAL;
2139 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002140 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002141 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2142 ret = -EPERM;
2143 goto done;
2144 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002145
Todd Kjosb0117bb2017-05-08 09:16:27 -07002146 ret = binder_inc_ref_for_node(target_proc, node,
2147 fp->hdr.type == BINDER_TYPE_BINDER,
2148 &thread->todo, &rdata);
2149 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002150 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002151
2152 if (fp->hdr.type == BINDER_TYPE_BINDER)
2153 fp->hdr.type = BINDER_TYPE_HANDLE;
2154 else
2155 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2156 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002157 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002158 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002159
Todd Kjosb0117bb2017-05-08 09:16:27 -07002160 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002161 binder_debug(BINDER_DEBUG_TRANSACTION,
2162 " node %d u%016llx -> ref %d desc %d\n",
2163 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002164 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002165done:
2166 binder_put_node(node);
2167 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002168}
2169
2170static int binder_translate_handle(struct flat_binder_object *fp,
2171 struct binder_transaction *t,
2172 struct binder_thread *thread)
2173{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002174 struct binder_proc *proc = thread->proc;
2175 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002176 struct binder_node *node;
2177 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002178 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002179
Todd Kjosb0117bb2017-05-08 09:16:27 -07002180 node = binder_get_node_from_ref(proc, fp->handle,
2181 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2182 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002183 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2184 proc->pid, thread->pid, fp->handle);
2185 return -EINVAL;
2186 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002187 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2188 ret = -EPERM;
2189 goto done;
2190 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002191
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002192 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002193 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002194 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2195 fp->hdr.type = BINDER_TYPE_BINDER;
2196 else
2197 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002198 fp->binder = node->ptr;
2199 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002200 if (node->proc)
2201 binder_inner_proc_lock(node->proc);
2202 binder_inc_node_nilocked(node,
2203 fp->hdr.type == BINDER_TYPE_BINDER,
2204 0, NULL);
2205 if (node->proc)
2206 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002207 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002208 binder_debug(BINDER_DEBUG_TRANSACTION,
2209 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002210 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2211 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002212 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002213 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002214 int ret;
2215 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002216
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002217 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002218 ret = binder_inc_ref_for_node(target_proc, node,
2219 fp->hdr.type == BINDER_TYPE_HANDLE,
2220 NULL, &dest_rdata);
2221 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002222 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002223
2224 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002225 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002226 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002227 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2228 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002229 binder_debug(BINDER_DEBUG_TRANSACTION,
2230 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002231 src_rdata.debug_id, src_rdata.desc,
2232 dest_rdata.debug_id, dest_rdata.desc,
2233 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002234 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002235done:
2236 binder_put_node(node);
2237 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002238}
2239
2240static int binder_translate_fd(int fd,
2241 struct binder_transaction *t,
2242 struct binder_thread *thread,
2243 struct binder_transaction *in_reply_to)
2244{
2245 struct binder_proc *proc = thread->proc;
2246 struct binder_proc *target_proc = t->to_proc;
2247 int target_fd;
2248 struct file *file;
2249 int ret;
2250 bool target_allows_fd;
2251
2252 if (in_reply_to)
2253 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2254 else
2255 target_allows_fd = t->buffer->target_node->accept_fds;
2256 if (!target_allows_fd) {
2257 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2258 proc->pid, thread->pid,
2259 in_reply_to ? "reply" : "transaction",
2260 fd);
2261 ret = -EPERM;
2262 goto err_fd_not_accepted;
2263 }
2264
2265 file = fget(fd);
2266 if (!file) {
2267 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2268 proc->pid, thread->pid, fd);
2269 ret = -EBADF;
2270 goto err_fget;
2271 }
2272 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2273 if (ret < 0) {
2274 ret = -EPERM;
2275 goto err_security;
2276 }
2277
2278 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2279 if (target_fd < 0) {
2280 ret = -ENOMEM;
2281 goto err_get_unused_fd;
2282 }
2283 task_fd_install(target_proc, target_fd, file);
2284 trace_binder_transaction_fd(t, fd, target_fd);
2285 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2286 fd, target_fd);
2287
2288 return target_fd;
2289
2290err_get_unused_fd:
2291err_security:
2292 fput(file);
2293err_fget:
2294err_fd_not_accepted:
2295 return ret;
2296}
2297
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002298static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2299 struct binder_buffer_object *parent,
2300 struct binder_transaction *t,
2301 struct binder_thread *thread,
2302 struct binder_transaction *in_reply_to)
2303{
2304 binder_size_t fdi, fd_buf_size, num_installed_fds;
2305 int target_fd;
2306 uintptr_t parent_buffer;
2307 u32 *fd_array;
2308 struct binder_proc *proc = thread->proc;
2309 struct binder_proc *target_proc = t->to_proc;
2310
2311 fd_buf_size = sizeof(u32) * fda->num_fds;
2312 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2313 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2314 proc->pid, thread->pid, (u64)fda->num_fds);
2315 return -EINVAL;
2316 }
2317 if (fd_buf_size > parent->length ||
2318 fda->parent_offset > parent->length - fd_buf_size) {
2319 /* No space for all file descriptors here. */
2320 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2321 proc->pid, thread->pid, (u64)fda->num_fds);
2322 return -EINVAL;
2323 }
2324 /*
2325 * Since the parent was already fixed up, convert it
2326 * back to the kernel address space to access it
2327 */
Todd Kjosd325d372016-10-10 10:40:53 -07002328 parent_buffer = parent->buffer -
2329 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002330 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2331 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2332 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2333 proc->pid, thread->pid);
2334 return -EINVAL;
2335 }
2336 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2337 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2338 in_reply_to);
2339 if (target_fd < 0)
2340 goto err_translate_fd_failed;
2341 fd_array[fdi] = target_fd;
2342 }
2343 return 0;
2344
2345err_translate_fd_failed:
2346 /*
2347 * Failed to allocate fd or security error, free fds
2348 * installed so far.
2349 */
2350 num_installed_fds = fdi;
2351 for (fdi = 0; fdi < num_installed_fds; fdi++)
2352 task_close_fd(target_proc, fd_array[fdi]);
2353 return target_fd;
2354}
2355
Martijn Coenen5a6da532016-09-30 14:10:07 +02002356static int binder_fixup_parent(struct binder_transaction *t,
2357 struct binder_thread *thread,
2358 struct binder_buffer_object *bp,
2359 binder_size_t *off_start,
2360 binder_size_t num_valid,
2361 struct binder_buffer_object *last_fixup_obj,
2362 binder_size_t last_fixup_min_off)
2363{
2364 struct binder_buffer_object *parent;
2365 u8 *parent_buffer;
2366 struct binder_buffer *b = t->buffer;
2367 struct binder_proc *proc = thread->proc;
2368 struct binder_proc *target_proc = t->to_proc;
2369
2370 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2371 return 0;
2372
2373 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2374 if (!parent) {
2375 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2376 proc->pid, thread->pid);
2377 return -EINVAL;
2378 }
2379
2380 if (!binder_validate_fixup(b, off_start,
2381 parent, bp->parent_offset,
2382 last_fixup_obj,
2383 last_fixup_min_off)) {
2384 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2385 proc->pid, thread->pid);
2386 return -EINVAL;
2387 }
2388
2389 if (parent->length < sizeof(binder_uintptr_t) ||
2390 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2391 /* No space for a pointer here! */
2392 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2393 proc->pid, thread->pid);
2394 return -EINVAL;
2395 }
2396 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002397 binder_alloc_get_user_buffer_offset(
2398 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002399 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2400
2401 return 0;
2402}
2403
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002404static void binder_transaction(struct binder_proc *proc,
2405 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002406 struct binder_transaction_data *tr, int reply,
2407 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002408{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002409 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002410 struct binder_transaction *t;
2411 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002412 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002413 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002414 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002415 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002416 struct binder_thread *target_thread = NULL;
2417 struct binder_node *target_node = NULL;
2418 struct list_head *target_list;
2419 wait_queue_head_t *target_wait;
2420 struct binder_transaction *in_reply_to = NULL;
2421 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002422 uint32_t return_error = 0;
2423 uint32_t return_error_param = 0;
2424 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002425 struct binder_buffer_object *last_fixup_obj = NULL;
2426 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002427 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002428 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002429
2430 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002431 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002432 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2433 e->from_proc = proc->pid;
2434 e->from_thread = thread->pid;
2435 e->target_handle = tr->target.handle;
2436 e->data_size = tr->data_size;
2437 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002438 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002439
2440 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002441 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002442 in_reply_to = thread->transaction_stack;
2443 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002444 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302445 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002446 proc->pid, thread->pid);
2447 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002448 return_error_param = -EPROTO;
2449 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002450 goto err_empty_call_stack;
2451 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002452 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002453 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302454 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002455 proc->pid, thread->pid, in_reply_to->debug_id,
2456 in_reply_to->to_proc ?
2457 in_reply_to->to_proc->pid : 0,
2458 in_reply_to->to_thread ?
2459 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002460 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002461 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002462 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002463 return_error_param = -EPROTO;
2464 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002465 in_reply_to = NULL;
2466 goto err_bad_call_stack;
2467 }
2468 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002469 binder_inner_proc_unlock(proc);
2470 binder_set_nice(in_reply_to->saved_priority);
2471 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002472 if (target_thread == NULL) {
2473 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002474 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002475 goto err_dead_binder;
2476 }
2477 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302478 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002479 proc->pid, thread->pid,
2480 target_thread->transaction_stack ?
2481 target_thread->transaction_stack->debug_id : 0,
2482 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002483 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002484 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002485 return_error_param = -EPROTO;
2486 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002487 in_reply_to = NULL;
2488 target_thread = NULL;
2489 goto err_dead_binder;
2490 }
2491 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002492 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002493 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002494 } else {
2495 if (tr->target.handle) {
2496 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002497
Todd Kjosc37162d2017-05-26 11:56:29 -07002498 /*
2499 * There must already be a strong ref
2500 * on this node. If so, do a strong
2501 * increment on the node to ensure it
2502 * stays alive until the transaction is
2503 * done.
2504 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002505 binder_proc_lock(proc);
2506 ref = binder_get_ref_olocked(proc, tr->target.handle,
2507 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002508 if (ref) {
2509 binder_inc_node(ref->node, 1, 0, NULL);
2510 target_node = ref->node;
2511 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002512 binder_proc_unlock(proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002513 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302514 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002515 proc->pid, thread->pid);
2516 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002517 return_error_param = -EINVAL;
2518 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002519 goto err_invalid_target_handle;
2520 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002521 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002522 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002523 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002524 if (target_node == NULL) {
2525 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002526 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjose598d172017-03-22 17:19:52 -07002527 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002528 goto err_no_context_mgr_node;
2529 }
Todd Kjosc37162d2017-05-26 11:56:29 -07002530 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002531 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002532 }
2533 e->to_node = target_node->debug_id;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002534 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002535 target_proc = target_node->proc;
2536 if (target_proc == NULL) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002537 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002538 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002539 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002540 goto err_dead_binder;
2541 }
Todd Kjosb4827902017-05-25 15:52:17 -07002542 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002543 target_proc->tmp_ref++;
Todd Kjosb4827902017-05-25 15:52:17 -07002544 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002545 binder_node_unlock(target_node);
Stephen Smalley79af7302015-01-21 10:54:10 -05002546 if (security_binder_transaction(proc->tsk,
2547 target_proc->tsk) < 0) {
2548 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002549 return_error_param = -EPERM;
2550 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05002551 goto err_invalid_target_handle;
2552 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002553 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002554 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2555 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002556
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002557 tmp = thread->transaction_stack;
2558 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002559 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302560 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002561 proc->pid, thread->pid, tmp->debug_id,
2562 tmp->to_proc ? tmp->to_proc->pid : 0,
2563 tmp->to_thread ?
2564 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002565 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002566 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002567 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002568 return_error_param = -EPROTO;
2569 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002570 goto err_bad_call_stack;
2571 }
2572 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002573 struct binder_thread *from;
2574
2575 spin_lock(&tmp->lock);
2576 from = tmp->from;
2577 if (from && from->proc == target_proc) {
2578 atomic_inc(&from->tmp_ref);
2579 target_thread = from;
2580 spin_unlock(&tmp->lock);
2581 break;
2582 }
2583 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002584 tmp = tmp->from_parent;
2585 }
2586 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002587 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002588 }
2589 if (target_thread) {
2590 e->to_thread = target_thread->pid;
2591 target_list = &target_thread->todo;
2592 target_wait = &target_thread->wait;
2593 } else {
2594 target_list = &target_proc->todo;
2595 target_wait = &target_proc->wait;
2596 }
2597 e->to_proc = target_proc->pid;
2598
2599 /* TODO: reuse incoming transaction for reply */
2600 t = kzalloc(sizeof(*t), GFP_KERNEL);
2601 if (t == NULL) {
2602 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002603 return_error_param = -ENOMEM;
2604 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002605 goto err_alloc_t_failed;
2606 }
2607 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07002608 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002609
2610 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2611 if (tcomplete == NULL) {
2612 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002613 return_error_param = -ENOMEM;
2614 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002615 goto err_alloc_tcomplete_failed;
2616 }
2617 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2618
Todd Kjos1cfe6272017-05-24 13:33:28 -07002619 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002620
2621 if (reply)
2622 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002623 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002624 proc->pid, thread->pid, t->debug_id,
2625 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002626 (u64)tr->data.ptr.buffer,
2627 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002628 (u64)tr->data_size, (u64)tr->offsets_size,
2629 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002630 else
2631 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002632 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002633 proc->pid, thread->pid, t->debug_id,
2634 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002635 (u64)tr->data.ptr.buffer,
2636 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002637 (u64)tr->data_size, (u64)tr->offsets_size,
2638 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002639
2640 if (!reply && !(tr->flags & TF_ONE_WAY))
2641 t->from = thread;
2642 else
2643 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03002644 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002645 t->to_proc = target_proc;
2646 t->to_thread = target_thread;
2647 t->code = tr->code;
2648 t->flags = tr->flags;
2649 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002650
2651 trace_binder_transaction(reply, t, target_node);
2652
Todd Kjosd325d372016-10-10 10:40:53 -07002653 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02002654 tr->offsets_size, extra_buffers_size,
2655 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07002656 if (IS_ERR(t->buffer)) {
2657 /*
2658 * -ESRCH indicates VMA cleared. The target is dying.
2659 */
2660 return_error_param = PTR_ERR(t->buffer);
2661 return_error = return_error_param == -ESRCH ?
2662 BR_DEAD_REPLY : BR_FAILED_REPLY;
2663 return_error_line = __LINE__;
2664 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002665 goto err_binder_alloc_buf_failed;
2666 }
2667 t->buffer->allow_user_free = 0;
2668 t->buffer->debug_id = t->debug_id;
2669 t->buffer->transaction = t;
2670 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002671 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002672 off_start = (binder_size_t *)(t->buffer->data +
2673 ALIGN(tr->data_size, sizeof(void *)));
2674 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002675
Arve Hjønnevågda498892014-02-21 14:40:26 -08002676 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2677 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302678 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2679 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002680 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002681 return_error_param = -EFAULT;
2682 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002683 goto err_copy_data_failed;
2684 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002685 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2686 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302687 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2688 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002689 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002690 return_error_param = -EFAULT;
2691 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002692 goto err_copy_data_failed;
2693 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002694 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2695 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2696 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002697 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002698 return_error_param = -EINVAL;
2699 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002700 goto err_bad_offset;
2701 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02002702 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2703 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2704 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05302705 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002706 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002707 return_error_param = -EINVAL;
2708 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002709 goto err_bad_offset;
2710 }
2711 off_end = (void *)off_start + tr->offsets_size;
2712 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2713 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002714 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002715 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002716 struct binder_object_header *hdr;
2717 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002718
Martijn Coenen00c80372016-07-13 12:06:49 +02002719 if (object_size == 0 || *offp < off_min) {
2720 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002721 proc->pid, thread->pid, (u64)*offp,
2722 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02002723 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002724 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002725 return_error_param = -EINVAL;
2726 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002727 goto err_bad_offset;
2728 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002729
2730 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2731 off_min = *offp + object_size;
2732 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002733 case BINDER_TYPE_BINDER:
2734 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002735 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002736
Martijn Coenen00c80372016-07-13 12:06:49 +02002737 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002738 ret = binder_translate_binder(fp, t, thread);
2739 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02002740 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002741 return_error_param = ret;
2742 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002743 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002744 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002745 } break;
2746 case BINDER_TYPE_HANDLE:
2747 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002748 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002749
Martijn Coenen00c80372016-07-13 12:06:49 +02002750 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002751 ret = binder_translate_handle(fp, t, thread);
2752 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002753 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002754 return_error_param = ret;
2755 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002756 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002757 }
2758 } break;
2759
2760 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002761 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002762 int target_fd = binder_translate_fd(fp->fd, t, thread,
2763 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002764
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002765 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002766 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002767 return_error_param = target_fd;
2768 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002769 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002770 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002771 fp->pad_binder = 0;
2772 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002773 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002774 case BINDER_TYPE_FDA: {
2775 struct binder_fd_array_object *fda =
2776 to_binder_fd_array_object(hdr);
2777 struct binder_buffer_object *parent =
2778 binder_validate_ptr(t->buffer, fda->parent,
2779 off_start,
2780 offp - off_start);
2781 if (!parent) {
2782 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2783 proc->pid, thread->pid);
2784 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002785 return_error_param = -EINVAL;
2786 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002787 goto err_bad_parent;
2788 }
2789 if (!binder_validate_fixup(t->buffer, off_start,
2790 parent, fda->parent_offset,
2791 last_fixup_obj,
2792 last_fixup_min_off)) {
2793 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2794 proc->pid, thread->pid);
2795 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002796 return_error_param = -EINVAL;
2797 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002798 goto err_bad_parent;
2799 }
2800 ret = binder_translate_fd_array(fda, parent, t, thread,
2801 in_reply_to);
2802 if (ret < 0) {
2803 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002804 return_error_param = ret;
2805 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002806 goto err_translate_failed;
2807 }
2808 last_fixup_obj = parent;
2809 last_fixup_min_off =
2810 fda->parent_offset + sizeof(u32) * fda->num_fds;
2811 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002812 case BINDER_TYPE_PTR: {
2813 struct binder_buffer_object *bp =
2814 to_binder_buffer_object(hdr);
2815 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002816
Martijn Coenen5a6da532016-09-30 14:10:07 +02002817 if (bp->length > buf_left) {
2818 binder_user_error("%d:%d got transaction with too large buffer\n",
2819 proc->pid, thread->pid);
2820 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002821 return_error_param = -EINVAL;
2822 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002823 goto err_bad_offset;
2824 }
2825 if (copy_from_user(sg_bufp,
2826 (const void __user *)(uintptr_t)
2827 bp->buffer, bp->length)) {
2828 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2829 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07002830 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002831 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002832 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002833 goto err_copy_data_failed;
2834 }
2835 /* Fixup buffer pointer to target proc address space */
2836 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07002837 binder_alloc_get_user_buffer_offset(
2838 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002839 sg_bufp += ALIGN(bp->length, sizeof(u64));
2840
2841 ret = binder_fixup_parent(t, thread, bp, off_start,
2842 offp - off_start,
2843 last_fixup_obj,
2844 last_fixup_min_off);
2845 if (ret < 0) {
2846 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002847 return_error_param = ret;
2848 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002849 goto err_translate_failed;
2850 }
2851 last_fixup_obj = bp;
2852 last_fixup_min_off = 0;
2853 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002854 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002855 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002856 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002857 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002858 return_error_param = -EINVAL;
2859 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002860 goto err_bad_object_type;
2861 }
2862 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002863 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002864 binder_enqueue_work(proc, tcomplete, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002865 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002866
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002867 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002868 binder_inner_proc_lock(target_proc);
2869 if (target_thread->is_dead) {
2870 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002871 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002872 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002873 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002874 binder_pop_transaction_ilocked(target_thread, in_reply_to);
2875 binder_enqueue_work_ilocked(&t->work, target_list);
2876 binder_inner_proc_unlock(target_proc);
Todd Kjos21ef40a2017-03-30 18:02:13 -07002877 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002878 } else if (!(t->flags & TF_ONE_WAY)) {
2879 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002880 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002881 t->need_reply = 1;
2882 t->from_parent = thread->transaction_stack;
2883 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002884 binder_inner_proc_unlock(proc);
2885 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002886 if (target_proc->is_dead ||
2887 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002888 binder_inner_proc_unlock(target_proc);
2889 binder_inner_proc_lock(proc);
2890 binder_pop_transaction_ilocked(thread, t);
2891 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002892 goto err_dead_proc_or_thread;
2893 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002894 binder_enqueue_work_ilocked(&t->work, target_list);
2895 binder_inner_proc_unlock(target_proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002896 } else {
2897 BUG_ON(target_node == NULL);
2898 BUG_ON(t->buffer->async_transaction != 1);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002899 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002900 if (target_node->has_async_transaction) {
2901 target_list = &target_node->async_todo;
2902 target_wait = NULL;
2903 } else
2904 target_node->has_async_transaction = 1;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002905 /*
2906 * Test/set of has_async_transaction
2907 * must be atomic with enqueue on
2908 * async_todo
2909 */
Martijn Coenen995a36e2017-06-02 13:36:52 -07002910 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002911 if (target_proc->is_dead ||
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002912 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002913 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002914 binder_node_unlock(target_node);
Todd Kjos2f993e22017-05-12 14:42:55 -07002915 goto err_dead_proc_or_thread;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002916 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002917 binder_enqueue_work_ilocked(&t->work, target_list);
2918 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002919 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002920 }
Riley Andrewsb5968812015-09-01 12:42:07 -07002921 if (target_wait) {
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002922 if (reply || !(tr->flags & TF_ONE_WAY))
Riley Andrewsb5968812015-09-01 12:42:07 -07002923 wake_up_interruptible_sync(target_wait);
2924 else
2925 wake_up_interruptible(target_wait);
2926 }
Todd Kjos2f993e22017-05-12 14:42:55 -07002927 if (target_thread)
2928 binder_thread_dec_tmpref(target_thread);
2929 binder_proc_dec_tmpref(target_proc);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002930 /*
2931 * write barrier to synchronize with initialization
2932 * of log entry
2933 */
2934 smp_wmb();
2935 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002936 return;
2937
Todd Kjos2f993e22017-05-12 14:42:55 -07002938err_dead_proc_or_thread:
2939 return_error = BR_DEAD_REPLY;
2940 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002941err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002942err_bad_object_type:
2943err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002944err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002945err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002946 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002947 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjosc37162d2017-05-26 11:56:29 -07002948 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002949 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07002950 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002951err_binder_alloc_buf_failed:
2952 kfree(tcomplete);
2953 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2954err_alloc_tcomplete_failed:
2955 kfree(t);
2956 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2957err_alloc_t_failed:
2958err_bad_call_stack:
2959err_empty_call_stack:
2960err_dead_binder:
2961err_invalid_target_handle:
2962err_no_context_mgr_node:
Todd Kjos2f993e22017-05-12 14:42:55 -07002963 if (target_thread)
2964 binder_thread_dec_tmpref(target_thread);
2965 if (target_proc)
2966 binder_proc_dec_tmpref(target_proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002967 if (target_node)
2968 binder_dec_node(target_node, 1, 0);
2969
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002970 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07002971 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2972 proc->pid, thread->pid, return_error, return_error_param,
2973 (u64)tr->data_size, (u64)tr->offsets_size,
2974 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002975
2976 {
2977 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09002978
Todd Kjose598d172017-03-22 17:19:52 -07002979 e->return_error = return_error;
2980 e->return_error_param = return_error_param;
2981 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002982 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2983 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002984 /*
2985 * write barrier to synchronize with initialization
2986 * of log entry
2987 */
2988 smp_wmb();
2989 WRITE_ONCE(e->debug_id_done, t_debug_id);
2990 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002991 }
2992
Todd Kjos858b8da2017-04-21 17:35:12 -07002993 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002994 if (in_reply_to) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002995 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002996 binder_enqueue_work(thread->proc,
2997 &thread->return_error.work,
2998 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002999 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07003000 } else {
3001 thread->return_error.cmd = return_error;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003002 binder_enqueue_work(thread->proc,
3003 &thread->return_error.work,
3004 &thread->todo);
Todd Kjos858b8da2017-04-21 17:35:12 -07003005 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003006}
3007
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003008static int binder_thread_write(struct binder_proc *proc,
3009 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003010 binder_uintptr_t binder_buffer, size_t size,
3011 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003012{
3013 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003014 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003015 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003016 void __user *ptr = buffer + *consumed;
3017 void __user *end = buffer + size;
3018
Todd Kjos858b8da2017-04-21 17:35:12 -07003019 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003020 int ret;
3021
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003022 if (get_user(cmd, (uint32_t __user *)ptr))
3023 return -EFAULT;
3024 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003025 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003026 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003027 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3028 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3029 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003030 }
3031 switch (cmd) {
3032 case BC_INCREFS:
3033 case BC_ACQUIRE:
3034 case BC_RELEASE:
3035 case BC_DECREFS: {
3036 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003037 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003038 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3039 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3040 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003041
3042 if (get_user(target, (uint32_t __user *)ptr))
3043 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003044
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003045 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003046 ret = -1;
3047 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003048 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003049 mutex_lock(&context->context_mgr_node_lock);
3050 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003051 if (ctx_mgr_node)
3052 ret = binder_inc_ref_for_node(
3053 proc, ctx_mgr_node,
3054 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003055 mutex_unlock(&context->context_mgr_node_lock);
3056 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003057 if (ret)
3058 ret = binder_update_ref_for_handle(
3059 proc, target, increment, strong,
3060 &rdata);
3061 if (!ret && rdata.desc != target) {
3062 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3063 proc->pid, thread->pid,
3064 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003065 }
3066 switch (cmd) {
3067 case BC_INCREFS:
3068 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003069 break;
3070 case BC_ACQUIRE:
3071 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003072 break;
3073 case BC_RELEASE:
3074 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003075 break;
3076 case BC_DECREFS:
3077 default:
3078 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003079 break;
3080 }
3081 if (ret) {
3082 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3083 proc->pid, thread->pid, debug_string,
3084 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003085 break;
3086 }
3087 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003088 "%d:%d %s ref %d desc %d s %d w %d\n",
3089 proc->pid, thread->pid, debug_string,
3090 rdata.debug_id, rdata.desc, rdata.strong,
3091 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003092 break;
3093 }
3094 case BC_INCREFS_DONE:
3095 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003096 binder_uintptr_t node_ptr;
3097 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003098 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003099 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003100
Arve Hjønnevågda498892014-02-21 14:40:26 -08003101 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003102 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003103 ptr += sizeof(binder_uintptr_t);
3104 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003105 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003106 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003107 node = binder_get_node(proc, node_ptr);
3108 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003109 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003110 proc->pid, thread->pid,
3111 cmd == BC_INCREFS_DONE ?
3112 "BC_INCREFS_DONE" :
3113 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003114 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003115 break;
3116 }
3117 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003118 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003119 proc->pid, thread->pid,
3120 cmd == BC_INCREFS_DONE ?
3121 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003122 (u64)node_ptr, node->debug_id,
3123 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003124 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003125 break;
3126 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003127 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003128 if (cmd == BC_ACQUIRE_DONE) {
3129 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303130 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003131 proc->pid, thread->pid,
3132 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003133 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003134 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003135 break;
3136 }
3137 node->pending_strong_ref = 0;
3138 } else {
3139 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303140 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003141 proc->pid, thread->pid,
3142 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003143 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003144 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003145 break;
3146 }
3147 node->pending_weak_ref = 0;
3148 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003149 free_node = binder_dec_node_nilocked(node,
3150 cmd == BC_ACQUIRE_DONE, 0);
3151 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003152 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003153 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003154 proc->pid, thread->pid,
3155 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003156 node->debug_id, node->local_strong_refs,
3157 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003158 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003159 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003160 break;
3161 }
3162 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303163 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003164 return -EINVAL;
3165 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303166 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003167 return -EINVAL;
3168
3169 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003170 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003171 struct binder_buffer *buffer;
3172
Arve Hjønnevågda498892014-02-21 14:40:26 -08003173 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003174 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003175 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003176
Todd Kjos076072a2017-04-21 14:32:11 -07003177 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3178 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003179 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003180 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3181 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003182 break;
3183 }
3184 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003185 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3186 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003187 break;
3188 }
3189 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003190 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3191 proc->pid, thread->pid, (u64)data_ptr,
3192 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003193 buffer->transaction ? "active" : "finished");
3194
3195 if (buffer->transaction) {
3196 buffer->transaction->buffer = NULL;
3197 buffer->transaction = NULL;
3198 }
3199 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003200 struct binder_node *buf_node;
3201 struct binder_work *w;
3202
3203 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003204 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003205 BUG_ON(!buf_node->has_async_transaction);
3206 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003207 w = binder_dequeue_work_head_ilocked(
3208 &buf_node->async_todo);
3209 if (!w)
3210 buf_node->has_async_transaction = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003211 else
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003212 binder_enqueue_work_ilocked(
3213 w, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003214 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003215 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003216 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003217 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003218 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003219 break;
3220 }
3221
Martijn Coenen5a6da532016-09-30 14:10:07 +02003222 case BC_TRANSACTION_SG:
3223 case BC_REPLY_SG: {
3224 struct binder_transaction_data_sg tr;
3225
3226 if (copy_from_user(&tr, ptr, sizeof(tr)))
3227 return -EFAULT;
3228 ptr += sizeof(tr);
3229 binder_transaction(proc, thread, &tr.transaction_data,
3230 cmd == BC_REPLY_SG, tr.buffers_size);
3231 break;
3232 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003233 case BC_TRANSACTION:
3234 case BC_REPLY: {
3235 struct binder_transaction_data tr;
3236
3237 if (copy_from_user(&tr, ptr, sizeof(tr)))
3238 return -EFAULT;
3239 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003240 binder_transaction(proc, thread, &tr,
3241 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003242 break;
3243 }
3244
3245 case BC_REGISTER_LOOPER:
3246 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303247 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003248 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003249 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003250 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3251 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303252 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003253 proc->pid, thread->pid);
3254 } else if (proc->requested_threads == 0) {
3255 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303256 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003257 proc->pid, thread->pid);
3258 } else {
3259 proc->requested_threads--;
3260 proc->requested_threads_started++;
3261 }
3262 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003263 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003264 break;
3265 case BC_ENTER_LOOPER:
3266 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303267 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003268 proc->pid, thread->pid);
3269 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3270 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303271 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003272 proc->pid, thread->pid);
3273 }
3274 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3275 break;
3276 case BC_EXIT_LOOPER:
3277 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303278 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003279 proc->pid, thread->pid);
3280 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3281 break;
3282
3283 case BC_REQUEST_DEATH_NOTIFICATION:
3284 case BC_CLEAR_DEATH_NOTIFICATION: {
3285 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003286 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003287 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003288 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003289
3290 if (get_user(target, (uint32_t __user *)ptr))
3291 return -EFAULT;
3292 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003293 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003294 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003295 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003296 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3297 /*
3298 * Allocate memory for death notification
3299 * before taking lock
3300 */
3301 death = kzalloc(sizeof(*death), GFP_KERNEL);
3302 if (death == NULL) {
3303 WARN_ON(thread->return_error.cmd !=
3304 BR_OK);
3305 thread->return_error.cmd = BR_ERROR;
3306 binder_enqueue_work(
3307 thread->proc,
3308 &thread->return_error.work,
3309 &thread->todo);
3310 binder_debug(
3311 BINDER_DEBUG_FAILED_TRANSACTION,
3312 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3313 proc->pid, thread->pid);
3314 break;
3315 }
3316 }
3317 binder_proc_lock(proc);
3318 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003319 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303320 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003321 proc->pid, thread->pid,
3322 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3323 "BC_REQUEST_DEATH_NOTIFICATION" :
3324 "BC_CLEAR_DEATH_NOTIFICATION",
3325 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003326 binder_proc_unlock(proc);
3327 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003328 break;
3329 }
3330
3331 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003332 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003333 proc->pid, thread->pid,
3334 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3335 "BC_REQUEST_DEATH_NOTIFICATION" :
3336 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003337 (u64)cookie, ref->data.debug_id,
3338 ref->data.desc, ref->data.strong,
3339 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003340
Martijn Coenenf9eac642017-05-22 11:26:23 -07003341 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003342 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3343 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303344 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003345 proc->pid, thread->pid);
Martijn Coenenf9eac642017-05-22 11:26:23 -07003346 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003347 binder_proc_unlock(proc);
3348 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003349 break;
3350 }
3351 binder_stats_created(BINDER_STAT_DEATH);
3352 INIT_LIST_HEAD(&death->work.entry);
3353 death->cookie = cookie;
3354 ref->death = death;
3355 if (ref->node->proc == NULL) {
3356 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003357 if (thread->looper &
3358 (BINDER_LOOPER_STATE_REGISTERED |
3359 BINDER_LOOPER_STATE_ENTERED))
3360 binder_enqueue_work(
3361 proc,
3362 &ref->death->work,
3363 &thread->todo);
3364 else {
3365 binder_enqueue_work(
3366 proc,
3367 &ref->death->work,
3368 &proc->todo);
3369 wake_up_interruptible(
3370 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003371 }
3372 }
3373 } else {
3374 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303375 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003376 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003377 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003378 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003379 break;
3380 }
3381 death = ref->death;
3382 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003383 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003384 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003385 (u64)death->cookie,
3386 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003387 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003388 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003389 break;
3390 }
3391 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003392 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003393 if (list_empty(&death->work.entry)) {
3394 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003395 if (thread->looper &
3396 (BINDER_LOOPER_STATE_REGISTERED |
3397 BINDER_LOOPER_STATE_ENTERED))
3398 binder_enqueue_work_ilocked(
3399 &death->work,
3400 &thread->todo);
3401 else {
3402 binder_enqueue_work_ilocked(
3403 &death->work,
3404 &proc->todo);
3405 wake_up_interruptible(
3406 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003407 }
3408 } else {
3409 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3410 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3411 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003412 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003413 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003414 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003415 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003416 } break;
3417 case BC_DEAD_BINDER_DONE: {
3418 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003419 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003420 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003421
Arve Hjønnevågda498892014-02-21 14:40:26 -08003422 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003423 return -EFAULT;
3424
Lisa Du7a64cd82016-02-17 09:32:52 +08003425 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003426 binder_inner_proc_lock(proc);
3427 list_for_each_entry(w, &proc->delivered_death,
3428 entry) {
3429 struct binder_ref_death *tmp_death =
3430 container_of(w,
3431 struct binder_ref_death,
3432 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003433
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003434 if (tmp_death->cookie == cookie) {
3435 death = tmp_death;
3436 break;
3437 }
3438 }
3439 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003440 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3441 proc->pid, thread->pid, (u64)cookie,
3442 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003443 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003444 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3445 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003446 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003447 break;
3448 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003449 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003450 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3451 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003452 if (thread->looper &
3453 (BINDER_LOOPER_STATE_REGISTERED |
3454 BINDER_LOOPER_STATE_ENTERED))
3455 binder_enqueue_work_ilocked(
3456 &death->work, &thread->todo);
3457 else {
3458 binder_enqueue_work_ilocked(
3459 &death->work,
3460 &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003461 wake_up_interruptible(&proc->wait);
3462 }
3463 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003464 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003465 } break;
3466
3467 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303468 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003469 proc->pid, thread->pid, cmd);
3470 return -EINVAL;
3471 }
3472 *consumed = ptr - buffer;
3473 }
3474 return 0;
3475}
3476
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003477static void binder_stat_br(struct binder_proc *proc,
3478 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003479{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003480 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003481 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003482 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3483 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3484 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003485 }
3486}
3487
3488static int binder_has_proc_work(struct binder_proc *proc,
3489 struct binder_thread *thread)
3490{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003491 return !binder_worklist_empty(proc, &proc->todo) ||
3492 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003493}
3494
3495static int binder_has_thread_work(struct binder_thread *thread)
3496{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003497 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3498 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003499}
3500
Todd Kjos60792612017-05-24 10:51:01 -07003501static int binder_put_node_cmd(struct binder_proc *proc,
3502 struct binder_thread *thread,
3503 void __user **ptrp,
3504 binder_uintptr_t node_ptr,
3505 binder_uintptr_t node_cookie,
3506 int node_debug_id,
3507 uint32_t cmd, const char *cmd_name)
3508{
3509 void __user *ptr = *ptrp;
3510
3511 if (put_user(cmd, (uint32_t __user *)ptr))
3512 return -EFAULT;
3513 ptr += sizeof(uint32_t);
3514
3515 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3516 return -EFAULT;
3517 ptr += sizeof(binder_uintptr_t);
3518
3519 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3520 return -EFAULT;
3521 ptr += sizeof(binder_uintptr_t);
3522
3523 binder_stat_br(proc, thread, cmd);
3524 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3525 proc->pid, thread->pid, cmd_name, node_debug_id,
3526 (u64)node_ptr, (u64)node_cookie);
3527
3528 *ptrp = ptr;
3529 return 0;
3530}
3531
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003532static int binder_thread_read(struct binder_proc *proc,
3533 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003534 binder_uintptr_t binder_buffer, size_t size,
3535 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003536{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003537 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003538 void __user *ptr = buffer + *consumed;
3539 void __user *end = buffer + size;
3540
3541 int ret = 0;
3542 int wait_for_proc_work;
3543
3544 if (*consumed == 0) {
3545 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3546 return -EFAULT;
3547 ptr += sizeof(uint32_t);
3548 }
3549
3550retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07003551 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003552 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07003553 binder_worklist_empty_ilocked(&thread->todo);
Todd Kjosd600e902017-05-25 17:35:02 -07003554 if (wait_for_proc_work)
3555 proc->ready_threads++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003556 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003557
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003558 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003559
3560 binder_unlock(__func__);
3561
3562 trace_binder_wait_for_work(wait_for_proc_work,
3563 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003564 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003565 if (wait_for_proc_work) {
3566 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3567 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303568 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003569 proc->pid, thread->pid, thread->looper);
3570 wait_event_interruptible(binder_user_error_wait,
3571 binder_stop_on_user_error < 2);
3572 }
3573 binder_set_nice(proc->default_priority);
3574 if (non_block) {
3575 if (!binder_has_proc_work(proc, thread))
3576 ret = -EAGAIN;
3577 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003578 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003579 } else {
3580 if (non_block) {
3581 if (!binder_has_thread_work(thread))
3582 ret = -EAGAIN;
3583 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003584 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003585 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003586
3587 binder_lock(__func__);
3588
Todd Kjosd600e902017-05-25 17:35:02 -07003589 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003590 if (wait_for_proc_work)
3591 proc->ready_threads--;
Todd Kjosd600e902017-05-25 17:35:02 -07003592 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003593 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3594
3595 if (ret)
3596 return ret;
3597
3598 while (1) {
3599 uint32_t cmd;
3600 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003601 struct binder_work *w = NULL;
3602 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003603 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07003604 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003605
Todd Kjose7f23ed2017-03-21 13:06:01 -07003606 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003607 if (!binder_worklist_empty_ilocked(&thread->todo))
3608 list = &thread->todo;
3609 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3610 wait_for_proc_work)
3611 list = &proc->todo;
3612 else {
3613 binder_inner_proc_unlock(proc);
3614
Dmitry Voytik395262a2014-09-08 18:16:34 +04003615 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08003616 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003617 goto retry;
3618 break;
3619 }
3620
Todd Kjose7f23ed2017-03-21 13:06:01 -07003621 if (end - ptr < sizeof(tr) + 4) {
3622 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003623 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003624 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003625 w = binder_dequeue_work_head_ilocked(list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003626
3627 switch (w->type) {
3628 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003629 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003630 t = container_of(w, struct binder_transaction, work);
3631 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003632 case BINDER_WORK_RETURN_ERROR: {
3633 struct binder_error *e = container_of(
3634 w, struct binder_error, work);
3635
3636 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003637 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07003638 if (put_user(e->cmd, (uint32_t __user *)ptr))
3639 return -EFAULT;
3640 e->cmd = BR_OK;
3641 ptr += sizeof(uint32_t);
3642
3643 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07003644 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003645 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003646 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003647 cmd = BR_TRANSACTION_COMPLETE;
3648 if (put_user(cmd, (uint32_t __user *)ptr))
3649 return -EFAULT;
3650 ptr += sizeof(uint32_t);
3651
3652 binder_stat_br(proc, thread, cmd);
3653 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303654 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003655 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003656 kfree(w);
3657 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3658 } break;
3659 case BINDER_WORK_NODE: {
3660 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07003661 int strong, weak;
3662 binder_uintptr_t node_ptr = node->ptr;
3663 binder_uintptr_t node_cookie = node->cookie;
3664 int node_debug_id = node->debug_id;
3665 int has_weak_ref;
3666 int has_strong_ref;
3667 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09003668
Todd Kjos60792612017-05-24 10:51:01 -07003669 BUG_ON(proc != node->proc);
3670 strong = node->internal_strong_refs ||
3671 node->local_strong_refs;
3672 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07003673 node->local_weak_refs ||
3674 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07003675 has_strong_ref = node->has_strong_ref;
3676 has_weak_ref = node->has_weak_ref;
3677
3678 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003679 node->has_weak_ref = 1;
3680 node->pending_weak_ref = 1;
3681 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003682 }
3683 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003684 node->has_strong_ref = 1;
3685 node->pending_strong_ref = 1;
3686 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003687 }
3688 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003689 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003690 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003691 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003692 if (!weak && !strong) {
3693 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3694 "%d:%d node %d u%016llx c%016llx deleted\n",
3695 proc->pid, thread->pid,
3696 node_debug_id,
3697 (u64)node_ptr,
3698 (u64)node_cookie);
3699 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003700 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003701 binder_node_lock(node);
3702 /*
3703 * Acquire the node lock before freeing the
3704 * node to serialize with other threads that
3705 * may have been holding the node lock while
3706 * decrementing this node (avoids race where
3707 * this thread frees while the other thread
3708 * is unlocking the node after the final
3709 * decrement)
3710 */
3711 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003712 binder_free_node(node);
3713 } else
3714 binder_inner_proc_unlock(proc);
3715
Todd Kjos60792612017-05-24 10:51:01 -07003716 if (weak && !has_weak_ref)
3717 ret = binder_put_node_cmd(
3718 proc, thread, &ptr, node_ptr,
3719 node_cookie, node_debug_id,
3720 BR_INCREFS, "BR_INCREFS");
3721 if (!ret && strong && !has_strong_ref)
3722 ret = binder_put_node_cmd(
3723 proc, thread, &ptr, node_ptr,
3724 node_cookie, node_debug_id,
3725 BR_ACQUIRE, "BR_ACQUIRE");
3726 if (!ret && !strong && has_strong_ref)
3727 ret = binder_put_node_cmd(
3728 proc, thread, &ptr, node_ptr,
3729 node_cookie, node_debug_id,
3730 BR_RELEASE, "BR_RELEASE");
3731 if (!ret && !weak && has_weak_ref)
3732 ret = binder_put_node_cmd(
3733 proc, thread, &ptr, node_ptr,
3734 node_cookie, node_debug_id,
3735 BR_DECREFS, "BR_DECREFS");
3736 if (orig_ptr == ptr)
3737 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3738 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3739 proc->pid, thread->pid,
3740 node_debug_id,
3741 (u64)node_ptr,
3742 (u64)node_cookie);
3743 if (ret)
3744 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003745 } break;
3746 case BINDER_WORK_DEAD_BINDER:
3747 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3748 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3749 struct binder_ref_death *death;
3750 uint32_t cmd;
Martijn Coenenf9eac642017-05-22 11:26:23 -07003751 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003752
3753 death = container_of(w, struct binder_ref_death, work);
3754 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3755 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3756 else
3757 cmd = BR_DEAD_BINDER;
Martijn Coenenf9eac642017-05-22 11:26:23 -07003758 cookie = death->cookie;
3759
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003760 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003761 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003762 proc->pid, thread->pid,
3763 cmd == BR_DEAD_BINDER ?
3764 "BR_DEAD_BINDER" :
3765 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenf9eac642017-05-22 11:26:23 -07003766 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003767 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenf9eac642017-05-22 11:26:23 -07003768 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003769 kfree(death);
3770 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003771 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003772 binder_enqueue_work_ilocked(
3773 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003774 binder_inner_proc_unlock(proc);
3775 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003776 if (put_user(cmd, (uint32_t __user *)ptr))
3777 return -EFAULT;
3778 ptr += sizeof(uint32_t);
3779 if (put_user(cookie,
3780 (binder_uintptr_t __user *)ptr))
3781 return -EFAULT;
3782 ptr += sizeof(binder_uintptr_t);
3783 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003784 if (cmd == BR_DEAD_BINDER)
3785 goto done; /* DEAD_BINDER notifications can cause transactions */
3786 } break;
3787 }
3788
3789 if (!t)
3790 continue;
3791
3792 BUG_ON(t->buffer == NULL);
3793 if (t->buffer->target_node) {
3794 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09003795
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003796 tr.target.ptr = target_node->ptr;
3797 tr.cookie = target_node->cookie;
3798 t->saved_priority = task_nice(current);
3799 if (t->priority < target_node->min_priority &&
3800 !(t->flags & TF_ONE_WAY))
3801 binder_set_nice(t->priority);
3802 else if (!(t->flags & TF_ONE_WAY) ||
3803 t->saved_priority > target_node->min_priority)
3804 binder_set_nice(target_node->min_priority);
3805 cmd = BR_TRANSACTION;
3806 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003807 tr.target.ptr = 0;
3808 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003809 cmd = BR_REPLY;
3810 }
3811 tr.code = t->code;
3812 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06003813 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003814
Todd Kjos2f993e22017-05-12 14:42:55 -07003815 t_from = binder_get_txn_from(t);
3816 if (t_from) {
3817 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09003818
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003819 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08003820 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003821 } else {
3822 tr.sender_pid = 0;
3823 }
3824
3825 tr.data_size = t->buffer->data_size;
3826 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07003827 tr.data.ptr.buffer = (binder_uintptr_t)
3828 ((uintptr_t)t->buffer->data +
3829 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003830 tr.data.ptr.offsets = tr.data.ptr.buffer +
3831 ALIGN(t->buffer->data_size,
3832 sizeof(void *));
3833
Todd Kjos2f993e22017-05-12 14:42:55 -07003834 if (put_user(cmd, (uint32_t __user *)ptr)) {
3835 if (t_from)
3836 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003837 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003838 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003839 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07003840 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3841 if (t_from)
3842 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003843 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003844 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003845 ptr += sizeof(tr);
3846
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003847 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003848 binder_stat_br(proc, thread, cmd);
3849 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003850 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003851 proc->pid, thread->pid,
3852 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3853 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07003854 t->debug_id, t_from ? t_from->proc->pid : 0,
3855 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003856 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003857 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003858
Todd Kjos2f993e22017-05-12 14:42:55 -07003859 if (t_from)
3860 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003861 t->buffer->allow_user_free = 1;
3862 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003863 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003864 t->to_parent = thread->transaction_stack;
3865 t->to_thread = thread;
3866 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003867 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003868 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07003869 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003870 }
3871 break;
3872 }
3873
3874done:
3875
3876 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07003877 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003878 if (proc->requested_threads + proc->ready_threads == 0 &&
3879 proc->requested_threads_started < proc->max_threads &&
3880 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3881 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3882 /*spawn a new thread if we leave this out */) {
3883 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07003884 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003885 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303886 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003887 proc->pid, thread->pid);
3888 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3889 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003890 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07003891 } else
3892 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003893 return 0;
3894}
3895
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003896static void binder_release_work(struct binder_proc *proc,
3897 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003898{
3899 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09003900
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003901 while (1) {
3902 w = binder_dequeue_work_head(proc, list);
3903 if (!w)
3904 return;
3905
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003906 switch (w->type) {
3907 case BINDER_WORK_TRANSACTION: {
3908 struct binder_transaction *t;
3909
3910 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003911 if (t->buffer->target_node &&
3912 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003913 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003914 } else {
3915 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303916 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003917 t->debug_id);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003918 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003919 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003920 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003921 case BINDER_WORK_RETURN_ERROR: {
3922 struct binder_error *e = container_of(
3923 w, struct binder_error, work);
3924
3925 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3926 "undelivered TRANSACTION_ERROR: %u\n",
3927 e->cmd);
3928 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003929 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003930 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303931 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003932 kfree(w);
3933 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3934 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003935 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3936 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3937 struct binder_ref_death *death;
3938
3939 death = container_of(w, struct binder_ref_death, work);
3940 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003941 "undelivered death notification, %016llx\n",
3942 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003943 kfree(death);
3944 binder_stats_deleted(BINDER_STAT_DEATH);
3945 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003946 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303947 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003948 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003949 break;
3950 }
3951 }
3952
3953}
3954
Todd Kjosb4827902017-05-25 15:52:17 -07003955static struct binder_thread *binder_get_thread_ilocked(
3956 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003957{
3958 struct binder_thread *thread = NULL;
3959 struct rb_node *parent = NULL;
3960 struct rb_node **p = &proc->threads.rb_node;
3961
3962 while (*p) {
3963 parent = *p;
3964 thread = rb_entry(parent, struct binder_thread, rb_node);
3965
3966 if (current->pid < thread->pid)
3967 p = &(*p)->rb_left;
3968 else if (current->pid > thread->pid)
3969 p = &(*p)->rb_right;
3970 else
Todd Kjosb4827902017-05-25 15:52:17 -07003971 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003972 }
Todd Kjosb4827902017-05-25 15:52:17 -07003973 if (!new_thread)
3974 return NULL;
3975 thread = new_thread;
3976 binder_stats_created(BINDER_STAT_THREAD);
3977 thread->proc = proc;
3978 thread->pid = current->pid;
3979 atomic_set(&thread->tmp_ref, 0);
3980 init_waitqueue_head(&thread->wait);
3981 INIT_LIST_HEAD(&thread->todo);
3982 rb_link_node(&thread->rb_node, parent, p);
3983 rb_insert_color(&thread->rb_node, &proc->threads);
3984 thread->looper_need_return = true;
3985 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3986 thread->return_error.cmd = BR_OK;
3987 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3988 thread->reply_error.cmd = BR_OK;
3989
3990 return thread;
3991}
3992
3993static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3994{
3995 struct binder_thread *thread;
3996 struct binder_thread *new_thread;
3997
3998 binder_inner_proc_lock(proc);
3999 thread = binder_get_thread_ilocked(proc, NULL);
4000 binder_inner_proc_unlock(proc);
4001 if (!thread) {
4002 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4003 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004004 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004005 binder_inner_proc_lock(proc);
4006 thread = binder_get_thread_ilocked(proc, new_thread);
4007 binder_inner_proc_unlock(proc);
4008 if (thread != new_thread)
4009 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004010 }
4011 return thread;
4012}
4013
Todd Kjos2f993e22017-05-12 14:42:55 -07004014static void binder_free_proc(struct binder_proc *proc)
4015{
4016 BUG_ON(!list_empty(&proc->todo));
4017 BUG_ON(!list_empty(&proc->delivered_death));
4018 binder_alloc_deferred_release(&proc->alloc);
4019 put_task_struct(proc->tsk);
4020 binder_stats_deleted(BINDER_STAT_PROC);
4021 kfree(proc);
4022}
4023
4024static void binder_free_thread(struct binder_thread *thread)
4025{
4026 BUG_ON(!list_empty(&thread->todo));
4027 binder_stats_deleted(BINDER_STAT_THREAD);
4028 binder_proc_dec_tmpref(thread->proc);
4029 kfree(thread);
4030}
4031
4032static int binder_thread_release(struct binder_proc *proc,
4033 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004034{
4035 struct binder_transaction *t;
4036 struct binder_transaction *send_reply = NULL;
4037 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004038 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004039
Todd Kjosb4827902017-05-25 15:52:17 -07004040 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004041 /*
4042 * take a ref on the proc so it survives
4043 * after we remove this thread from proc->threads.
4044 * The corresponding dec is when we actually
4045 * free the thread in binder_free_thread()
4046 */
4047 proc->tmp_ref++;
4048 /*
4049 * take a ref on this thread to ensure it
4050 * survives while we are releasing it
4051 */
4052 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004053 rb_erase(&thread->rb_node, &proc->threads);
4054 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004055 if (t) {
4056 spin_lock(&t->lock);
4057 if (t->to_thread == thread)
4058 send_reply = t;
4059 }
4060 thread->is_dead = true;
4061
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004062 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004063 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004064 active_transactions++;
4065 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304066 "release %d:%d transaction %d %s, still active\n",
4067 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004068 t->debug_id,
4069 (t->to_thread == thread) ? "in" : "out");
4070
4071 if (t->to_thread == thread) {
4072 t->to_proc = NULL;
4073 t->to_thread = NULL;
4074 if (t->buffer) {
4075 t->buffer->transaction = NULL;
4076 t->buffer = NULL;
4077 }
4078 t = t->to_parent;
4079 } else if (t->from == thread) {
4080 t->from = NULL;
4081 t = t->from_parent;
4082 } else
4083 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004084 spin_unlock(&last_t->lock);
4085 if (t)
4086 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004087 }
Todd Kjosb4827902017-05-25 15:52:17 -07004088 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004089
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004090 if (send_reply)
4091 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004092 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004093 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004094 return active_transactions;
4095}
4096
4097static unsigned int binder_poll(struct file *filp,
4098 struct poll_table_struct *wait)
4099{
4100 struct binder_proc *proc = filp->private_data;
4101 struct binder_thread *thread = NULL;
4102 int wait_for_proc_work;
4103
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004104 binder_lock(__func__);
4105
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004106 thread = binder_get_thread(proc);
4107
Martijn Coenen995a36e2017-06-02 13:36:52 -07004108 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004109 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07004110 binder_worklist_empty_ilocked(&thread->todo);
4111 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004112
4113 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004114
4115 if (wait_for_proc_work) {
4116 if (binder_has_proc_work(proc, thread))
4117 return POLLIN;
4118 poll_wait(filp, &proc->wait, wait);
4119 if (binder_has_proc_work(proc, thread))
4120 return POLLIN;
4121 } else {
4122 if (binder_has_thread_work(thread))
4123 return POLLIN;
4124 poll_wait(filp, &thread->wait, wait);
4125 if (binder_has_thread_work(thread))
4126 return POLLIN;
4127 }
4128 return 0;
4129}
4130
Tair Rzayev78260ac2014-06-03 22:27:21 +03004131static int binder_ioctl_write_read(struct file *filp,
4132 unsigned int cmd, unsigned long arg,
4133 struct binder_thread *thread)
4134{
4135 int ret = 0;
4136 struct binder_proc *proc = filp->private_data;
4137 unsigned int size = _IOC_SIZE(cmd);
4138 void __user *ubuf = (void __user *)arg;
4139 struct binder_write_read bwr;
4140
4141 if (size != sizeof(struct binder_write_read)) {
4142 ret = -EINVAL;
4143 goto out;
4144 }
4145 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4146 ret = -EFAULT;
4147 goto out;
4148 }
4149 binder_debug(BINDER_DEBUG_READ_WRITE,
4150 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4151 proc->pid, thread->pid,
4152 (u64)bwr.write_size, (u64)bwr.write_buffer,
4153 (u64)bwr.read_size, (u64)bwr.read_buffer);
4154
4155 if (bwr.write_size > 0) {
4156 ret = binder_thread_write(proc, thread,
4157 bwr.write_buffer,
4158 bwr.write_size,
4159 &bwr.write_consumed);
4160 trace_binder_write_done(ret);
4161 if (ret < 0) {
4162 bwr.read_consumed = 0;
4163 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4164 ret = -EFAULT;
4165 goto out;
4166 }
4167 }
4168 if (bwr.read_size > 0) {
4169 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4170 bwr.read_size,
4171 &bwr.read_consumed,
4172 filp->f_flags & O_NONBLOCK);
4173 trace_binder_read_done(ret);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004174 if (!binder_worklist_empty(proc, &proc->todo))
Tair Rzayev78260ac2014-06-03 22:27:21 +03004175 wake_up_interruptible(&proc->wait);
4176 if (ret < 0) {
4177 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4178 ret = -EFAULT;
4179 goto out;
4180 }
4181 }
4182 binder_debug(BINDER_DEBUG_READ_WRITE,
4183 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4184 proc->pid, thread->pid,
4185 (u64)bwr.write_consumed, (u64)bwr.write_size,
4186 (u64)bwr.read_consumed, (u64)bwr.read_size);
4187 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4188 ret = -EFAULT;
4189 goto out;
4190 }
4191out:
4192 return ret;
4193}
4194
4195static int binder_ioctl_set_ctx_mgr(struct file *filp)
4196{
4197 int ret = 0;
4198 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004199 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004200 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004201 kuid_t curr_euid = current_euid();
4202
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004203 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004204 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004205 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4206 ret = -EBUSY;
4207 goto out;
4208 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004209 ret = security_binder_set_context_mgr(proc->tsk);
4210 if (ret < 0)
4211 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004212 if (uid_valid(context->binder_context_mgr_uid)) {
4213 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004214 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4215 from_kuid(&init_user_ns, curr_euid),
4216 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004217 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004218 ret = -EPERM;
4219 goto out;
4220 }
4221 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004222 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004223 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004224 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004225 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004226 ret = -ENOMEM;
4227 goto out;
4228 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004229 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004230 new_node->local_weak_refs++;
4231 new_node->local_strong_refs++;
4232 new_node->has_strong_ref = 1;
4233 new_node->has_weak_ref = 1;
4234 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004235 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004236 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004237out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004238 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004239 return ret;
4240}
4241
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004242static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4243{
4244 int ret;
4245 struct binder_proc *proc = filp->private_data;
4246 struct binder_thread *thread;
4247 unsigned int size = _IOC_SIZE(cmd);
4248 void __user *ubuf = (void __user *)arg;
4249
Tair Rzayev78260ac2014-06-03 22:27:21 +03004250 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4251 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004252
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004253 trace_binder_ioctl(cmd, arg);
4254
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004255 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4256 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004257 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004258
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004259 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004260 thread = binder_get_thread(proc);
4261 if (thread == NULL) {
4262 ret = -ENOMEM;
4263 goto err;
4264 }
4265
4266 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004267 case BINDER_WRITE_READ:
4268 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4269 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004270 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004271 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004272 case BINDER_SET_MAX_THREADS: {
4273 int max_threads;
4274
4275 if (copy_from_user(&max_threads, ubuf,
4276 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004277 ret = -EINVAL;
4278 goto err;
4279 }
Todd Kjosd600e902017-05-25 17:35:02 -07004280 binder_inner_proc_lock(proc);
4281 proc->max_threads = max_threads;
4282 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004283 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004284 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004285 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004286 ret = binder_ioctl_set_ctx_mgr(filp);
4287 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004288 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004289 break;
4290 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304291 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004292 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004293 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004294 thread = NULL;
4295 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004296 case BINDER_VERSION: {
4297 struct binder_version __user *ver = ubuf;
4298
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004299 if (size != sizeof(struct binder_version)) {
4300 ret = -EINVAL;
4301 goto err;
4302 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004303 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4304 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004305 ret = -EINVAL;
4306 goto err;
4307 }
4308 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004309 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004310 default:
4311 ret = -EINVAL;
4312 goto err;
4313 }
4314 ret = 0;
4315err:
4316 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004317 thread->looper_need_return = false;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004318 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004319 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4320 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304321 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004322err_unlocked:
4323 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004324 return ret;
4325}
4326
4327static void binder_vma_open(struct vm_area_struct *vma)
4328{
4329 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004330
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004331 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304332 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004333 proc->pid, vma->vm_start, vma->vm_end,
4334 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4335 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004336}
4337
4338static void binder_vma_close(struct vm_area_struct *vma)
4339{
4340 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004341
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004342 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304343 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004344 proc->pid, vma->vm_start, vma->vm_end,
4345 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4346 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004347 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004348 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4349}
4350
Vinayak Menonddac7d52014-06-02 18:17:59 +05304351static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4352{
4353 return VM_FAULT_SIGBUS;
4354}
4355
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004356static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004357 .open = binder_vma_open,
4358 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304359 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004360};
4361
Todd Kjosd325d372016-10-10 10:40:53 -07004362static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4363{
4364 int ret;
4365 struct binder_proc *proc = filp->private_data;
4366 const char *failure_string;
4367
4368 if (proc->tsk != current->group_leader)
4369 return -EINVAL;
4370
4371 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4372 vma->vm_end = vma->vm_start + SZ_4M;
4373
4374 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4375 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4376 __func__, proc->pid, vma->vm_start, vma->vm_end,
4377 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4378 (unsigned long)pgprot_val(vma->vm_page_prot));
4379
4380 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4381 ret = -EPERM;
4382 failure_string = "bad vm_flags";
4383 goto err_bad_arg;
4384 }
4385 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4386 vma->vm_ops = &binder_vm_ops;
4387 vma->vm_private_data = proc;
4388
4389 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4390 if (ret)
4391 return ret;
4392 proc->files = get_files_struct(current);
4393 return 0;
4394
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004395err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004396 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004397 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4398 return ret;
4399}
4400
4401static int binder_open(struct inode *nodp, struct file *filp)
4402{
4403 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004404 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004405
4406 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4407 current->group_leader->pid, current->pid);
4408
4409 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4410 if (proc == NULL)
4411 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004412 spin_lock_init(&proc->inner_lock);
4413 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004414 get_task_struct(current->group_leader);
4415 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004416 INIT_LIST_HEAD(&proc->todo);
4417 init_waitqueue_head(&proc->wait);
4418 proc->default_priority = task_nice(current);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004419 binder_dev = container_of(filp->private_data, struct binder_device,
4420 miscdev);
4421 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004422 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004423
4424 binder_lock(__func__);
4425
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004426 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004427 proc->pid = current->group_leader->pid;
4428 INIT_LIST_HEAD(&proc->delivered_death);
4429 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004430
4431 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004432
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004433 mutex_lock(&binder_procs_lock);
4434 hlist_add_head(&proc->proc_node, &binder_procs);
4435 mutex_unlock(&binder_procs_lock);
4436
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004437 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004438 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004439
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004440 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004441 /*
4442 * proc debug entries are shared between contexts, so
4443 * this will fail if the process tries to open the driver
4444 * again with a different context. The priting code will
4445 * anyway print all contexts that a given PID has, so this
4446 * is not a problem.
4447 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004448 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004449 binder_debugfs_dir_entry_proc,
4450 (void *)(unsigned long)proc->pid,
4451 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004452 }
4453
4454 return 0;
4455}
4456
4457static int binder_flush(struct file *filp, fl_owner_t id)
4458{
4459 struct binder_proc *proc = filp->private_data;
4460
4461 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4462
4463 return 0;
4464}
4465
4466static void binder_deferred_flush(struct binder_proc *proc)
4467{
4468 struct rb_node *n;
4469 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004470
Todd Kjosb4827902017-05-25 15:52:17 -07004471 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004472 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4473 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004474
Todd Kjos6798e6d2017-01-06 14:19:25 -08004475 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004476 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4477 wake_up_interruptible(&thread->wait);
4478 wake_count++;
4479 }
4480 }
Todd Kjosb4827902017-05-25 15:52:17 -07004481 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004482 wake_up_interruptible_all(&proc->wait);
4483
4484 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4485 "binder_flush: %d woke %d threads\n", proc->pid,
4486 wake_count);
4487}
4488
4489static int binder_release(struct inode *nodp, struct file *filp)
4490{
4491 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004492
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004493 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004494 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4495
4496 return 0;
4497}
4498
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004499static int binder_node_release(struct binder_node *node, int refs)
4500{
4501 struct binder_ref *ref;
4502 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004503 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004504
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004505 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004506
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004507 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004508 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004509 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004510 /*
4511 * The caller must have taken a temporary ref on the node,
4512 */
4513 BUG_ON(!node->tmp_refs);
4514 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004515 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004516 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004517 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004518
4519 return refs;
4520 }
4521
4522 node->proc = NULL;
4523 node->local_strong_refs = 0;
4524 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004525 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004526
4527 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004528 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004529 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004530
4531 hlist_for_each_entry(ref, &node->refs, node_entry) {
4532 refs++;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004533 /*
4534 * Need the node lock to synchronize
4535 * with new notification requests and the
4536 * inner lock to synchronize with queued
4537 * death notifications.
4538 */
4539 binder_inner_proc_lock(ref->proc);
4540 if (!ref->death) {
4541 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08004542 continue;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004543 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004544
4545 death++;
4546
Martijn Coenenf9eac642017-05-22 11:26:23 -07004547 BUG_ON(!list_empty(&ref->death->work.entry));
4548 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4549 binder_enqueue_work_ilocked(&ref->death->work,
4550 &ref->proc->todo);
4551 wake_up_interruptible(&ref->proc->wait);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004552 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004553 }
4554
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004555 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4556 "node %d now dead, refs %d, death %d\n",
4557 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004558 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004559 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004560
4561 return refs;
4562}
4563
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004564static void binder_deferred_release(struct binder_proc *proc)
4565{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004566 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004567 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07004568 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004569
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004570 BUG_ON(proc->files);
4571
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004572 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004573 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004574 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004575
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004576 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004577 if (context->binder_context_mgr_node &&
4578 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004579 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004580 "%s: %d context_mgr_node gone\n",
4581 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004582 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004583 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004584 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07004585 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004586 /*
4587 * Make sure proc stays alive after we
4588 * remove all the threads
4589 */
4590 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004591
Todd Kjos2f993e22017-05-12 14:42:55 -07004592 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004593 threads = 0;
4594 active_transactions = 0;
4595 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004596 struct binder_thread *thread;
4597
4598 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004599 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004600 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07004601 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07004602 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004603 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004604
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004605 nodes = 0;
4606 incoming_refs = 0;
4607 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004608 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004609
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004610 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004611 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07004612 /*
4613 * take a temporary ref on the node before
4614 * calling binder_node_release() which will either
4615 * kfree() the node or call binder_put_node()
4616 */
Todd Kjos425d23f2017-06-12 12:07:26 -07004617 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004618 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07004619 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004620 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07004621 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004622 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004623 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004624
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004625 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07004626 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004627 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004628 struct binder_ref *ref;
4629
4630 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004631 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07004632 binder_cleanup_ref_olocked(ref);
4633 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004634 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07004635 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004636 }
Todd Kjos5346bf32016-10-20 16:43:34 -07004637 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004638
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004639 binder_release_work(proc, &proc->todo);
4640 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004641
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004642 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07004643 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004644 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07004645 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004646
Todd Kjos2f993e22017-05-12 14:42:55 -07004647 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004648}
4649
4650static void binder_deferred_func(struct work_struct *work)
4651{
4652 struct binder_proc *proc;
4653 struct files_struct *files;
4654
4655 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09004656
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004657 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004658 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004659 mutex_lock(&binder_deferred_lock);
4660 if (!hlist_empty(&binder_deferred_list)) {
4661 proc = hlist_entry(binder_deferred_list.first,
4662 struct binder_proc, deferred_work_node);
4663 hlist_del_init(&proc->deferred_work_node);
4664 defer = proc->deferred_work;
4665 proc->deferred_work = 0;
4666 } else {
4667 proc = NULL;
4668 defer = 0;
4669 }
4670 mutex_unlock(&binder_deferred_lock);
4671
4672 files = NULL;
4673 if (defer & BINDER_DEFERRED_PUT_FILES) {
4674 files = proc->files;
4675 if (files)
4676 proc->files = NULL;
4677 }
4678
4679 if (defer & BINDER_DEFERRED_FLUSH)
4680 binder_deferred_flush(proc);
4681
4682 if (defer & BINDER_DEFERRED_RELEASE)
4683 binder_deferred_release(proc); /* frees proc */
4684
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004685 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004686 if (files)
4687 put_files_struct(files);
4688 } while (proc);
4689}
4690static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4691
4692static void
4693binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4694{
4695 mutex_lock(&binder_deferred_lock);
4696 proc->deferred_work |= defer;
4697 if (hlist_unhashed(&proc->deferred_work_node)) {
4698 hlist_add_head(&proc->deferred_work_node,
4699 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05304700 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004701 }
4702 mutex_unlock(&binder_deferred_lock);
4703}
4704
Todd Kjos6d241a42017-04-21 14:32:11 -07004705static void print_binder_transaction_ilocked(struct seq_file *m,
4706 struct binder_proc *proc,
4707 const char *prefix,
4708 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004709{
Todd Kjos6d241a42017-04-21 14:32:11 -07004710 struct binder_proc *to_proc;
4711 struct binder_buffer *buffer = t->buffer;
4712
4713 WARN_ON(!spin_is_locked(&proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07004714 spin_lock(&t->lock);
Todd Kjos6d241a42017-04-21 14:32:11 -07004715 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004716 seq_printf(m,
4717 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4718 prefix, t->debug_id, t,
4719 t->from ? t->from->proc->pid : 0,
4720 t->from ? t->from->pid : 0,
Todd Kjos6d241a42017-04-21 14:32:11 -07004721 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004722 t->to_thread ? t->to_thread->pid : 0,
4723 t->code, t->flags, t->priority, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07004724 spin_unlock(&t->lock);
4725
Todd Kjos6d241a42017-04-21 14:32:11 -07004726 if (proc != to_proc) {
4727 /*
4728 * Can only safely deref buffer if we are holding the
4729 * correct proc inner lock for this node
4730 */
4731 seq_puts(m, "\n");
4732 return;
4733 }
4734
4735 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004736 seq_puts(m, " buffer free\n");
4737 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004738 }
Todd Kjos6d241a42017-04-21 14:32:11 -07004739 if (buffer->target_node)
4740 seq_printf(m, " node %d", buffer->target_node->debug_id);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004741 seq_printf(m, " size %zd:%zd data %p\n",
Todd Kjos6d241a42017-04-21 14:32:11 -07004742 buffer->data_size, buffer->offsets_size,
4743 buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004744}
4745
Todd Kjos6d241a42017-04-21 14:32:11 -07004746static void print_binder_work_ilocked(struct seq_file *m,
4747 struct binder_proc *proc,
4748 const char *prefix,
4749 const char *transaction_prefix,
4750 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004751{
4752 struct binder_node *node;
4753 struct binder_transaction *t;
4754
4755 switch (w->type) {
4756 case BINDER_WORK_TRANSACTION:
4757 t = container_of(w, struct binder_transaction, work);
Todd Kjos6d241a42017-04-21 14:32:11 -07004758 print_binder_transaction_ilocked(
4759 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004760 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004761 case BINDER_WORK_RETURN_ERROR: {
4762 struct binder_error *e = container_of(
4763 w, struct binder_error, work);
4764
4765 seq_printf(m, "%stransaction error: %u\n",
4766 prefix, e->cmd);
4767 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004768 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004769 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004770 break;
4771 case BINDER_WORK_NODE:
4772 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08004773 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4774 prefix, node->debug_id,
4775 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004776 break;
4777 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004778 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004779 break;
4780 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004781 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004782 break;
4783 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004784 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004785 break;
4786 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004787 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004788 break;
4789 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004790}
4791
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004792static void print_binder_thread_ilocked(struct seq_file *m,
4793 struct binder_thread *thread,
4794 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004795{
4796 struct binder_transaction *t;
4797 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004798 size_t start_pos = m->count;
4799 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004800
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004801 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07004802 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08004803 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07004804 thread->looper_need_return,
4805 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004806 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004807 t = thread->transaction_stack;
4808 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004809 if (t->from == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07004810 print_binder_transaction_ilocked(m, thread->proc,
4811 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004812 t = t->from_parent;
4813 } else if (t->to_thread == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07004814 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004815 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004816 t = t->to_parent;
4817 } else {
Todd Kjos6d241a42017-04-21 14:32:11 -07004818 print_binder_transaction_ilocked(m, thread->proc,
4819 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004820 t = NULL;
4821 }
4822 }
4823 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos6d241a42017-04-21 14:32:11 -07004824 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004825 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004826 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004827 if (!print_always && m->count == header_pos)
4828 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004829}
4830
Todd Kjos425d23f2017-06-12 12:07:26 -07004831static void print_binder_node_nilocked(struct seq_file *m,
4832 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004833{
4834 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004835 struct binder_work *w;
4836 int count;
4837
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004838 WARN_ON(!spin_is_locked(&node->lock));
Todd Kjos425d23f2017-06-12 12:07:26 -07004839 if (node->proc)
4840 WARN_ON(!spin_is_locked(&node->proc->inner_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004841
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004842 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08004843 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004844 count++;
4845
Todd Kjosf22abc72017-05-09 11:08:05 -07004846 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08004847 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004848 node->has_strong_ref, node->has_weak_ref,
4849 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07004850 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004851 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004852 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08004853 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004854 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004855 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004856 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004857 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004858 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07004859 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004860 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004861 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004862}
4863
Todd Kjos5346bf32016-10-20 16:43:34 -07004864static void print_binder_ref_olocked(struct seq_file *m,
4865 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004866{
Todd Kjos5346bf32016-10-20 16:43:34 -07004867 WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004868 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004869 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4870 ref->data.debug_id, ref->data.desc,
4871 ref->node->proc ? "" : "dead ",
4872 ref->node->debug_id, ref->data.strong,
4873 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004874 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004875}
4876
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004877static void print_binder_proc(struct seq_file *m,
4878 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004879{
4880 struct binder_work *w;
4881 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004882 size_t start_pos = m->count;
4883 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07004884 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004885
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004886 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004887 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004888 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004889
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004890 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004891 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004892 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004893 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07004894
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004895 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004896 struct binder_node *node = rb_entry(n, struct binder_node,
4897 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07004898 /*
4899 * take a temporary reference on the node so it
4900 * survives and isn't removed from the tree
4901 * while we print it.
4902 */
4903 binder_inc_node_tmpref_ilocked(node);
4904 /* Need to drop inner lock to take node lock */
4905 binder_inner_proc_unlock(proc);
4906 if (last_node)
4907 binder_put_node(last_node);
4908 binder_node_inner_lock(node);
4909 print_binder_node_nilocked(m, node);
4910 binder_node_inner_unlock(node);
4911 last_node = node;
4912 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004913 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004914 binder_inner_proc_unlock(proc);
4915 if (last_node)
4916 binder_put_node(last_node);
4917
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004918 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07004919 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004920 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004921 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004922 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07004923 print_binder_ref_olocked(m, rb_entry(n,
4924 struct binder_ref,
4925 rb_node_desc));
4926 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004927 }
Todd Kjosd325d372016-10-10 10:40:53 -07004928 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004929 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004930 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07004931 print_binder_work_ilocked(m, proc, " ",
4932 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004933 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004934 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004935 break;
4936 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004937 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004938 if (!print_all && m->count == header_pos)
4939 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004940}
4941
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004942static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004943 "BR_ERROR",
4944 "BR_OK",
4945 "BR_TRANSACTION",
4946 "BR_REPLY",
4947 "BR_ACQUIRE_RESULT",
4948 "BR_DEAD_REPLY",
4949 "BR_TRANSACTION_COMPLETE",
4950 "BR_INCREFS",
4951 "BR_ACQUIRE",
4952 "BR_RELEASE",
4953 "BR_DECREFS",
4954 "BR_ATTEMPT_ACQUIRE",
4955 "BR_NOOP",
4956 "BR_SPAWN_LOOPER",
4957 "BR_FINISHED",
4958 "BR_DEAD_BINDER",
4959 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4960 "BR_FAILED_REPLY"
4961};
4962
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004963static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004964 "BC_TRANSACTION",
4965 "BC_REPLY",
4966 "BC_ACQUIRE_RESULT",
4967 "BC_FREE_BUFFER",
4968 "BC_INCREFS",
4969 "BC_ACQUIRE",
4970 "BC_RELEASE",
4971 "BC_DECREFS",
4972 "BC_INCREFS_DONE",
4973 "BC_ACQUIRE_DONE",
4974 "BC_ATTEMPT_ACQUIRE",
4975 "BC_REGISTER_LOOPER",
4976 "BC_ENTER_LOOPER",
4977 "BC_EXIT_LOOPER",
4978 "BC_REQUEST_DEATH_NOTIFICATION",
4979 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02004980 "BC_DEAD_BINDER_DONE",
4981 "BC_TRANSACTION_SG",
4982 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004983};
4984
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004985static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004986 "proc",
4987 "thread",
4988 "node",
4989 "ref",
4990 "death",
4991 "transaction",
4992 "transaction_complete"
4993};
4994
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004995static void print_binder_stats(struct seq_file *m, const char *prefix,
4996 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004997{
4998 int i;
4999
5000 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005001 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005002 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005003 int temp = atomic_read(&stats->bc[i]);
5004
5005 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005006 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005007 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005008 }
5009
5010 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005011 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005012 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005013 int temp = atomic_read(&stats->br[i]);
5014
5015 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005016 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005017 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005018 }
5019
5020 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005021 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005022 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005023 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005024 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005025 int created = atomic_read(&stats->obj_created[i]);
5026 int deleted = atomic_read(&stats->obj_deleted[i]);
5027
5028 if (created || deleted)
5029 seq_printf(m, "%s%s: active %d total %d\n",
5030 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005031 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005032 created - deleted,
5033 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005034 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005035}
5036
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005037static void print_binder_proc_stats(struct seq_file *m,
5038 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005039{
5040 struct binder_work *w;
5041 struct rb_node *n;
5042 int count, strong, weak;
Todd Kjosb4827902017-05-25 15:52:17 -07005043 size_t free_async_space =
5044 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005045
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005046 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005047 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005048 count = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005049 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005050 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5051 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005052 seq_printf(m, " threads: %d\n", count);
5053 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005054 " ready threads %d\n"
5055 " free async space %zd\n", proc->requested_threads,
5056 proc->requested_threads_started, proc->max_threads,
Todd Kjosd325d372016-10-10 10:40:53 -07005057 proc->ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005058 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005059 count = 0;
5060 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5061 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005062 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005063 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005064 count = 0;
5065 strong = 0;
5066 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005067 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005068 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5069 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5070 rb_node_desc);
5071 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005072 strong += ref->data.strong;
5073 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005074 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005075 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005076 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005077
Todd Kjosd325d372016-10-10 10:40:53 -07005078 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005079 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005080
5081 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005082 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005083 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005084 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005085 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005086 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005087 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005088 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005089
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005090 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005091}
5092
5093
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005094static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005095{
5096 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005097 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005098 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005099
Todd Kjos48b33212017-05-24 11:53:13 -07005100 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005101
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005102 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005103
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005104 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005105 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005106 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005107 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5108 /*
5109 * take a temporary reference on the node so it
5110 * survives and isn't removed from the list
5111 * while we print it.
5112 */
5113 node->tmp_refs++;
5114 spin_unlock(&binder_dead_nodes_lock);
5115 if (last_node)
5116 binder_put_node(last_node);
5117 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005118 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005119 binder_node_unlock(node);
5120 last_node = node;
5121 spin_lock(&binder_dead_nodes_lock);
5122 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005123 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005124 if (last_node)
5125 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005126
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005127 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005128 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005129 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005130 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005131 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005132 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005133}
5134
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005135static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005136{
5137 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005138
Todd Kjos48b33212017-05-24 11:53:13 -07005139 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005140
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005141 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005142
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005143 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005144
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005145 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005146 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005147 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005148 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005149 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005150 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005151}
5152
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005153static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005154{
5155 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005156
Todd Kjos48b33212017-05-24 11:53:13 -07005157 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005158
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005159 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005160 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005161 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005162 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005163 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005164 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005165 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005166}
5167
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005168static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005169{
Riley Andrews83050a42016-02-09 21:05:33 -08005170 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005171 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005172
Todd Kjos48b33212017-05-24 11:53:13 -07005173 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08005174
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005175 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005176 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005177 if (itr->pid == pid) {
5178 seq_puts(m, "binder proc state:\n");
5179 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005180 }
5181 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005182 mutex_unlock(&binder_procs_lock);
5183
Todd Kjos48b33212017-05-24 11:53:13 -07005184 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005185 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005186}
5187
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005188static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005189 struct binder_transaction_log_entry *e)
5190{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005191 int debug_id = READ_ONCE(e->debug_id_done);
5192 /*
5193 * read barrier to guarantee debug_id_done read before
5194 * we print the log values
5195 */
5196 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005197 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005198 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005199 e->debug_id, (e->call_type == 2) ? "reply" :
5200 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005201 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005202 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5203 e->return_error, e->return_error_param,
5204 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005205 /*
5206 * read-barrier to guarantee read of debug_id_done after
5207 * done printing the fields of the entry
5208 */
5209 smp_rmb();
5210 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5211 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005212}
5213
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005214static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005215{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005216 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005217 unsigned int log_cur = atomic_read(&log->cur);
5218 unsigned int count;
5219 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005220 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005221
Todd Kjos1cfe6272017-05-24 13:33:28 -07005222 count = log_cur + 1;
5223 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5224 0 : count % ARRAY_SIZE(log->entry);
5225 if (count > ARRAY_SIZE(log->entry) || log->full)
5226 count = ARRAY_SIZE(log->entry);
5227 for (i = 0; i < count; i++) {
5228 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5229
5230 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005231 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005232 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005233}
5234
5235static const struct file_operations binder_fops = {
5236 .owner = THIS_MODULE,
5237 .poll = binder_poll,
5238 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005239 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005240 .mmap = binder_mmap,
5241 .open = binder_open,
5242 .flush = binder_flush,
5243 .release = binder_release,
5244};
5245
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005246BINDER_DEBUG_ENTRY(state);
5247BINDER_DEBUG_ENTRY(stats);
5248BINDER_DEBUG_ENTRY(transactions);
5249BINDER_DEBUG_ENTRY(transaction_log);
5250
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005251static int __init init_binder_device(const char *name)
5252{
5253 int ret;
5254 struct binder_device *binder_device;
5255
5256 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5257 if (!binder_device)
5258 return -ENOMEM;
5259
5260 binder_device->miscdev.fops = &binder_fops;
5261 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5262 binder_device->miscdev.name = name;
5263
5264 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5265 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005266 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005267
5268 ret = misc_register(&binder_device->miscdev);
5269 if (ret < 0) {
5270 kfree(binder_device);
5271 return ret;
5272 }
5273
5274 hlist_add_head(&binder_device->hlist, &binder_devices);
5275
5276 return ret;
5277}
5278
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005279static int __init binder_init(void)
5280{
5281 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005282 char *device_name, *device_names;
5283 struct binder_device *device;
5284 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005285
Todd Kjos1cfe6272017-05-24 13:33:28 -07005286 atomic_set(&binder_transaction_log.cur, ~0U);
5287 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5288
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005289 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5290 if (binder_debugfs_dir_entry_root)
5291 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5292 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005293
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005294 if (binder_debugfs_dir_entry_root) {
5295 debugfs_create_file("state",
5296 S_IRUGO,
5297 binder_debugfs_dir_entry_root,
5298 NULL,
5299 &binder_state_fops);
5300 debugfs_create_file("stats",
5301 S_IRUGO,
5302 binder_debugfs_dir_entry_root,
5303 NULL,
5304 &binder_stats_fops);
5305 debugfs_create_file("transactions",
5306 S_IRUGO,
5307 binder_debugfs_dir_entry_root,
5308 NULL,
5309 &binder_transactions_fops);
5310 debugfs_create_file("transaction_log",
5311 S_IRUGO,
5312 binder_debugfs_dir_entry_root,
5313 &binder_transaction_log,
5314 &binder_transaction_log_fops);
5315 debugfs_create_file("failed_transaction_log",
5316 S_IRUGO,
5317 binder_debugfs_dir_entry_root,
5318 &binder_transaction_log_failed,
5319 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005320 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005321
5322 /*
5323 * Copy the module_parameter string, because we don't want to
5324 * tokenize it in-place.
5325 */
5326 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5327 if (!device_names) {
5328 ret = -ENOMEM;
5329 goto err_alloc_device_names_failed;
5330 }
5331 strcpy(device_names, binder_devices_param);
5332
5333 while ((device_name = strsep(&device_names, ","))) {
5334 ret = init_binder_device(device_name);
5335 if (ret)
5336 goto err_init_binder_device_failed;
5337 }
5338
5339 return ret;
5340
5341err_init_binder_device_failed:
5342 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5343 misc_deregister(&device->miscdev);
5344 hlist_del(&device->hlist);
5345 kfree(device);
5346 }
5347err_alloc_device_names_failed:
5348 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5349
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005350 return ret;
5351}
5352
5353device_initcall(binder_init);
5354
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005355#define CREATE_TRACE_POINTS
5356#include "binder_trace.h"
5357
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005358MODULE_LICENSE("GPL v2");