blob: 50831c7dba55ba3e70cb66ae273cbe49aef6f658 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
Martijn Coenen995a36e2017-06-02 13:36:52 -070033 * proc->delivered_death and node->async_todo), as well as
34 * thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070079#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070080#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090081
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070082static DEFINE_MUTEX(binder_main_lock);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070083
84static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090085static DEFINE_MUTEX(binder_deferred_lock);
86
Martijn Coenen6b7c7122016-09-30 16:08:09 +020087static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090088static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070089static DEFINE_MUTEX(binder_procs_lock);
90
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070092static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090093
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070094static struct dentry *binder_debugfs_dir_entry_root;
95static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070096static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090097
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070098#define BINDER_DEBUG_ENTRY(name) \
99static int binder_##name##_open(struct inode *inode, struct file *file) \
100{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700101 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700102} \
103\
104static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110}
111
112static int binder_proc_show(struct seq_file *m, void *unused);
113BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900114
115/* This is only defined in include/asm-arm/sizes.h */
116#ifndef SZ_1K
117#define SZ_1K 0x400
118#endif
119
120#ifndef SZ_4M
121#define SZ_4M 0x400000
122#endif
123
124#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900144};
145static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200149static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150module_param_named(devices, binder_devices_param, charp, S_IRUGO);
151
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900152static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153static int binder_stop_on_user_error;
154
155static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157{
158 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900159
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164}
165module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168#define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400171 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900172 } while (0)
173
174#define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400177 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
Martijn Coenen00c80372016-07-13 12:06:49 +0200182#define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
Martijn Coenen5a6da532016-09-30 14:10:07 +0200187#define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200190#define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900193enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202};
203
204struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900209};
210
211static struct binder_stats binder_stats;
212
213static inline void binder_stats_deleted(enum binder_stat_types type)
214{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700215 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900216}
217
218static inline void binder_stats_created(enum binder_stat_types type)
219{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700220 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900221}
222
223struct binder_transaction_log_entry {
224 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700225 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200238 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900239};
240struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700241 atomic_t cur;
242 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900243 struct binder_transaction_log_entry entry[32];
244};
245static struct binder_transaction_log binder_transaction_log;
246static struct binder_transaction_log binder_transaction_log_failed;
247
248static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250{
251 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700252 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900253
Todd Kjos1cfe6272017-05-24 13:33:28 -0700254 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900255 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900265 return e;
266}
267
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200268struct binder_context {
269 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700270 struct mutex context_mgr_node_lock;
271
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200272 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200273 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200274};
275
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200276struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200280};
281
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700282/**
283 * struct binder_work - work enqueued on a worklist
284 * @entry: node enqueued on list
285 * @type: type of work to be performed
286 *
287 * There are separate work lists for proc, thread, and node (async).
288 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900289struct binder_work {
290 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700291
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900292 enum {
293 BINDER_WORK_TRANSACTION = 1,
294 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700295 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900296 BINDER_WORK_NODE,
297 BINDER_WORK_DEAD_BINDER,
298 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
299 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
300 } type;
301};
302
Todd Kjos858b8da2017-04-21 17:35:12 -0700303struct binder_error {
304 struct binder_work work;
305 uint32_t cmd;
306};
307
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700308/**
309 * struct binder_node - binder node bookkeeping
310 * @debug_id: unique ID for debugging
311 * (invariant after initialized)
312 * @lock: lock for node fields
313 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700314 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700315 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700316 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700317 * @dead_node: element for binder_dead_nodes list
318 * (protected by binder_dead_nodes_lock)
319 * @proc: binder_proc that owns this node
320 * (invariant after initialized)
321 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700322 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700323 * @internal_strong_refs: used to take strong references when
324 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700327 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700330 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700331 * (protected by @proc->inner_lock if @proc
332 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700333 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700334 * (protected by @proc->inner_lock while @proc
335 * is valid, and by binder_dead_nodes_lock
336 * if @proc is NULL. During inc/dec and node release
337 * it is also protected by @lock to provide safety
338 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700339 * @ptr: userspace pointer for node
340 * (invariant, no lock needed)
341 * @cookie: userspace cookie for node
342 * (invariant, no lock needed)
343 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700346 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700349 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700352 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700353 * (protected by @proc->inner_lock if @proc
354 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700355 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700356 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700362 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700363 *
364 * Bookkeeping structure for binder nodes.
365 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900366struct binder_node {
367 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700368 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900369 struct binder_work work;
370 union {
371 struct rb_node rb_node;
372 struct hlist_node dead_node;
373 };
374 struct binder_proc *proc;
375 struct hlist_head refs;
376 int internal_strong_refs;
377 int local_weak_refs;
378 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700379 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800380 binder_uintptr_t ptr;
381 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700382 struct {
383 /*
384 * bitfield elements protected by
385 * proc inner_lock
386 */
387 u8 has_strong_ref:1;
388 u8 pending_strong_ref:1;
389 u8 has_weak_ref:1;
390 u8 pending_weak_ref:1;
391 };
392 struct {
393 /*
394 * invariant after initialization
395 */
396 u8 accept_fds:1;
397 u8 min_priority;
398 };
399 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400 struct list_head async_todo;
401};
402
403struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700404 /**
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
408 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900409 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800410 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411};
412
Todd Kjosb0117bb2017-05-08 09:16:27 -0700413/**
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
419 *
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
424 */
425struct binder_ref_data {
426 int debug_id;
427 uint32_t desc;
428 int strong;
429 int weak;
430};
431
432/**
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700438 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
444 *
445 * Structure to track references from procA to target node (on procB). This
446 * structure is unsafe to access without holding @proc->outer_lock.
447 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900448struct binder_ref {
449 /* Lookups needed: */
450 /* node + proc => ref (transaction) */
451 /* desc + proc => ref (transaction, inc/dec ref) */
452 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700453 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900454 struct rb_node rb_node_desc;
455 struct rb_node rb_node_node;
456 struct hlist_node node_entry;
457 struct binder_proc *proc;
458 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900459 struct binder_ref_death *death;
460};
461
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900462enum binder_deferred_state {
463 BINDER_DEFERRED_PUT_FILES = 0x01,
464 BINDER_DEFERRED_FLUSH = 0x02,
465 BINDER_DEFERRED_RELEASE = 0x04,
466};
467
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700468/**
469 * struct binder_proc - binder process bookkeeping
470 * @proc_node: element for binder_procs list
471 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700472 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700473 * @nodes: rbtree of binder nodes associated with
474 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700475 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700476 * @refs_by_desc: rbtree of refs ordered by ref->desc
477 * @refs_by_node: rbtree of refs ordered by ref->node
478 * @pid PID of group_leader of process
479 * (invariant after initialized)
480 * @tsk task_struct for group_leader of process
481 * (invariant after initialized)
482 * @files files_struct for process
483 * (invariant after initialized)
484 * @deferred_work_node: element for binder_deferred_list
485 * (protected by binder_deferred_lock)
486 * @deferred_work: bitmap of deferred work to perform
487 * (protected by binder_deferred_lock)
488 * @is_dead: process is dead and awaiting free
489 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700490 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700491 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700492 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700493 * @wait: wait queue head to wait for proc work
494 * (invariant after initialized)
495 * @stats: per-process binder statistics
496 * (atomics, no lock needed)
497 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700498 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700499 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700500 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700501 * @requested_threads: number of binder threads requested but not
502 * yet started. In current implementation, can
503 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700504 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700505 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700506 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700507 * @ready_threads: number of threads waiting for proc work
Todd Kjosd600e902017-05-25 17:35:02 -0700508 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700509 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700510 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700511 * @default_priority: default scheduler priority
512 * (invariant after initialized)
513 * @debugfs_entry: debugfs node
514 * @alloc: binder allocator bookkeeping
515 * @context: binder_context for this proc
516 * (invariant after initialized)
517 * @inner_lock: can nest under outer_lock and/or node lock
518 * @outer_lock: no nesting under innor or node lock
519 * Lock order: 1) outer, 2) node, 3) inner
520 *
521 * Bookkeeping structure for binder processes
522 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900523struct binder_proc {
524 struct hlist_node proc_node;
525 struct rb_root threads;
526 struct rb_root nodes;
527 struct rb_root refs_by_desc;
528 struct rb_root refs_by_node;
529 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900530 struct task_struct *tsk;
531 struct files_struct *files;
532 struct hlist_node deferred_work_node;
533 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700534 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900535
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900536 struct list_head todo;
537 wait_queue_head_t wait;
538 struct binder_stats stats;
539 struct list_head delivered_death;
540 int max_threads;
541 int requested_threads;
542 int requested_threads_started;
543 int ready_threads;
Todd Kjos2f993e22017-05-12 14:42:55 -0700544 int tmp_ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900545 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700546 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700547 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200548 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700549 spinlock_t inner_lock;
550 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900551};
552
553enum {
554 BINDER_LOOPER_STATE_REGISTERED = 0x01,
555 BINDER_LOOPER_STATE_ENTERED = 0x02,
556 BINDER_LOOPER_STATE_EXITED = 0x04,
557 BINDER_LOOPER_STATE_INVALID = 0x08,
558 BINDER_LOOPER_STATE_WAITING = 0x10,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900559};
560
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700561/**
562 * struct binder_thread - binder thread bookkeeping
563 * @proc: binder process for this thread
564 * (invariant after initialization)
565 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700566 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700567 * @pid: PID for this thread
568 * (invariant after initialization)
569 * @looper: bitmap of looping state
570 * (only accessed by this thread)
571 * @looper_needs_return: looping thread needs to exit driver
572 * (no lock needed)
573 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700574 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700575 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700576 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700577 * @return_error: transaction errors reported by this thread
578 * (only accessed by this thread)
579 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700580 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700581 * @wait: wait queue for thread work
582 * @stats: per-thread statistics
583 * (atomics, no lock needed)
584 * @tmp_ref: temporary reference to indicate thread is in use
585 * (atomic since @proc->inner_lock cannot
586 * always be acquired)
587 * @is_dead: thread is dead and awaiting free
588 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700589 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700590 *
591 * Bookkeeping structure for binder threads.
592 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900593struct binder_thread {
594 struct binder_proc *proc;
595 struct rb_node rb_node;
596 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800597 int looper; /* only modified by this thread */
598 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900599 struct binder_transaction *transaction_stack;
600 struct list_head todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700601 struct binder_error return_error;
602 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900603 wait_queue_head_t wait;
604 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700605 atomic_t tmp_ref;
606 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900607};
608
609struct binder_transaction {
610 int debug_id;
611 struct binder_work work;
612 struct binder_thread *from;
613 struct binder_transaction *from_parent;
614 struct binder_proc *to_proc;
615 struct binder_thread *to_thread;
616 struct binder_transaction *to_parent;
617 unsigned need_reply:1;
618 /* unsigned is_dead:1; */ /* not used at the moment */
619
620 struct binder_buffer *buffer;
621 unsigned int code;
622 unsigned int flags;
623 long priority;
624 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600625 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700626 /**
627 * @lock: protects @from, @to_proc, and @to_thread
628 *
629 * @from, @to_proc, and @to_thread can be set to NULL
630 * during thread teardown
631 */
632 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900633};
634
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700635/**
636 * binder_proc_lock() - Acquire outer lock for given binder_proc
637 * @proc: struct binder_proc to acquire
638 *
639 * Acquires proc->outer_lock. Used to protect binder_ref
640 * structures associated with the given proc.
641 */
642#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
643static void
644_binder_proc_lock(struct binder_proc *proc, int line)
645{
646 binder_debug(BINDER_DEBUG_SPINLOCKS,
647 "%s: line=%d\n", __func__, line);
648 spin_lock(&proc->outer_lock);
649}
650
651/**
652 * binder_proc_unlock() - Release spinlock for given binder_proc
653 * @proc: struct binder_proc to acquire
654 *
655 * Release lock acquired via binder_proc_lock()
656 */
657#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
658static void
659_binder_proc_unlock(struct binder_proc *proc, int line)
660{
661 binder_debug(BINDER_DEBUG_SPINLOCKS,
662 "%s: line=%d\n", __func__, line);
663 spin_unlock(&proc->outer_lock);
664}
665
666/**
667 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
668 * @proc: struct binder_proc to acquire
669 *
670 * Acquires proc->inner_lock. Used to protect todo lists
671 */
672#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
673static void
674_binder_inner_proc_lock(struct binder_proc *proc, int line)
675{
676 binder_debug(BINDER_DEBUG_SPINLOCKS,
677 "%s: line=%d\n", __func__, line);
678 spin_lock(&proc->inner_lock);
679}
680
681/**
682 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
683 * @proc: struct binder_proc to acquire
684 *
685 * Release lock acquired via binder_inner_proc_lock()
686 */
687#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
688static void
689_binder_inner_proc_unlock(struct binder_proc *proc, int line)
690{
691 binder_debug(BINDER_DEBUG_SPINLOCKS,
692 "%s: line=%d\n", __func__, line);
693 spin_unlock(&proc->inner_lock);
694}
695
696/**
697 * binder_node_lock() - Acquire spinlock for given binder_node
698 * @node: struct binder_node to acquire
699 *
700 * Acquires node->lock. Used to protect binder_node fields
701 */
702#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
703static void
704_binder_node_lock(struct binder_node *node, int line)
705{
706 binder_debug(BINDER_DEBUG_SPINLOCKS,
707 "%s: line=%d\n", __func__, line);
708 spin_lock(&node->lock);
709}
710
711/**
712 * binder_node_unlock() - Release spinlock for given binder_proc
713 * @node: struct binder_node to acquire
714 *
715 * Release lock acquired via binder_node_lock()
716 */
717#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
718static void
719_binder_node_unlock(struct binder_node *node, int line)
720{
721 binder_debug(BINDER_DEBUG_SPINLOCKS,
722 "%s: line=%d\n", __func__, line);
723 spin_unlock(&node->lock);
724}
725
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700726/**
727 * binder_node_inner_lock() - Acquire node and inner locks
728 * @node: struct binder_node to acquire
729 *
730 * Acquires node->lock. If node->proc also acquires
731 * proc->inner_lock. Used to protect binder_node fields
732 */
733#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
734static void
735_binder_node_inner_lock(struct binder_node *node, int line)
736{
737 binder_debug(BINDER_DEBUG_SPINLOCKS,
738 "%s: line=%d\n", __func__, line);
739 spin_lock(&node->lock);
740 if (node->proc)
741 binder_inner_proc_lock(node->proc);
742}
743
744/**
745 * binder_node_unlock() - Release node and inner locks
746 * @node: struct binder_node to acquire
747 *
748 * Release lock acquired via binder_node_lock()
749 */
750#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
751static void
752_binder_node_inner_unlock(struct binder_node *node, int line)
753{
754 struct binder_proc *proc = node->proc;
755
756 binder_debug(BINDER_DEBUG_SPINLOCKS,
757 "%s: line=%d\n", __func__, line);
758 if (proc)
759 binder_inner_proc_unlock(proc);
760 spin_unlock(&node->lock);
761}
762
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700763static bool binder_worklist_empty_ilocked(struct list_head *list)
764{
765 return list_empty(list);
766}
767
768/**
769 * binder_worklist_empty() - Check if no items on the work list
770 * @proc: binder_proc associated with list
771 * @list: list to check
772 *
773 * Return: true if there are no items on list, else false
774 */
775static bool binder_worklist_empty(struct binder_proc *proc,
776 struct list_head *list)
777{
778 bool ret;
779
780 binder_inner_proc_lock(proc);
781 ret = binder_worklist_empty_ilocked(list);
782 binder_inner_proc_unlock(proc);
783 return ret;
784}
785
786static void
787binder_enqueue_work_ilocked(struct binder_work *work,
788 struct list_head *target_list)
789{
790 BUG_ON(target_list == NULL);
791 BUG_ON(work->entry.next && !list_empty(&work->entry));
792 list_add_tail(&work->entry, target_list);
793}
794
795/**
796 * binder_enqueue_work() - Add an item to the work list
797 * @proc: binder_proc associated with list
798 * @work: struct binder_work to add to list
799 * @target_list: list to add work to
800 *
801 * Adds the work to the specified list. Asserts that work
802 * is not already on a list.
803 */
804static void
805binder_enqueue_work(struct binder_proc *proc,
806 struct binder_work *work,
807 struct list_head *target_list)
808{
809 binder_inner_proc_lock(proc);
810 binder_enqueue_work_ilocked(work, target_list);
811 binder_inner_proc_unlock(proc);
812}
813
814static void
815binder_dequeue_work_ilocked(struct binder_work *work)
816{
817 list_del_init(&work->entry);
818}
819
820/**
821 * binder_dequeue_work() - Removes an item from the work list
822 * @proc: binder_proc associated with list
823 * @work: struct binder_work to remove from list
824 *
825 * Removes the specified work item from whatever list it is on.
826 * Can safely be called if work is not on any list.
827 */
828static void
829binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
830{
831 binder_inner_proc_lock(proc);
832 binder_dequeue_work_ilocked(work);
833 binder_inner_proc_unlock(proc);
834}
835
836static struct binder_work *binder_dequeue_work_head_ilocked(
837 struct list_head *list)
838{
839 struct binder_work *w;
840
841 w = list_first_entry_or_null(list, struct binder_work, entry);
842 if (w)
843 list_del_init(&w->entry);
844 return w;
845}
846
847/**
848 * binder_dequeue_work_head() - Dequeues the item at head of list
849 * @proc: binder_proc associated with list
850 * @list: list to dequeue head
851 *
852 * Removes the head of the list if there are items on the list
853 *
854 * Return: pointer dequeued binder_work, NULL if list was empty
855 */
856static struct binder_work *binder_dequeue_work_head(
857 struct binder_proc *proc,
858 struct list_head *list)
859{
860 struct binder_work *w;
861
862 binder_inner_proc_lock(proc);
863 w = binder_dequeue_work_head_ilocked(list);
864 binder_inner_proc_unlock(proc);
865 return w;
866}
867
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900868static void
869binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700870static void binder_free_thread(struct binder_thread *thread);
871static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700872static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900873
Sachin Kamatefde99c2012-08-17 16:39:36 +0530874static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900875{
876 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900877 unsigned long rlim_cur;
878 unsigned long irqs;
879
880 if (files == NULL)
881 return -ESRCH;
882
Al Virodcfadfa2012-08-12 17:27:30 -0400883 if (!lock_task_sighand(proc->tsk, &irqs))
884 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900885
Al Virodcfadfa2012-08-12 17:27:30 -0400886 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
887 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900888
Al Virodcfadfa2012-08-12 17:27:30 -0400889 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900890}
891
892/*
893 * copied from fd_install
894 */
895static void task_fd_install(
896 struct binder_proc *proc, unsigned int fd, struct file *file)
897{
Al Virof869e8a2012-08-15 21:06:33 -0400898 if (proc->files)
899 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900900}
901
902/*
903 * copied from sys_close
904 */
905static long task_close_fd(struct binder_proc *proc, unsigned int fd)
906{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900907 int retval;
908
Al Viro483ce1d2012-08-19 12:04:24 -0400909 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900910 return -ESRCH;
911
Al Viro483ce1d2012-08-19 12:04:24 -0400912 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900913 /* can't restart close syscall because file table entry was cleared */
914 if (unlikely(retval == -ERESTARTSYS ||
915 retval == -ERESTARTNOINTR ||
916 retval == -ERESTARTNOHAND ||
917 retval == -ERESTART_RESTARTBLOCK))
918 retval = -EINTR;
919
920 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900921}
922
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700923static inline void binder_lock(const char *tag)
924{
925 trace_binder_lock(tag);
926 mutex_lock(&binder_main_lock);
927 trace_binder_locked(tag);
928}
929
930static inline void binder_unlock(const char *tag)
931{
932 trace_binder_unlock(tag);
933 mutex_unlock(&binder_main_lock);
934}
935
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900936static void binder_set_nice(long nice)
937{
938 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900939
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900940 if (can_nice(current, nice)) {
941 set_user_nice(current, nice);
942 return;
943 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900944 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900945 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530946 "%d: nice value %ld not allowed use %ld instead\n",
947 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900948 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800949 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900950 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530951 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900952}
953
Todd Kjos425d23f2017-06-12 12:07:26 -0700954static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
955 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900956{
957 struct rb_node *n = proc->nodes.rb_node;
958 struct binder_node *node;
959
Todd Kjos425d23f2017-06-12 12:07:26 -0700960 BUG_ON(!spin_is_locked(&proc->inner_lock));
961
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900962 while (n) {
963 node = rb_entry(n, struct binder_node, rb_node);
964
965 if (ptr < node->ptr)
966 n = n->rb_left;
967 else if (ptr > node->ptr)
968 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -0700969 else {
970 /*
971 * take an implicit weak reference
972 * to ensure node stays alive until
973 * call to binder_put_node()
974 */
Todd Kjos425d23f2017-06-12 12:07:26 -0700975 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900976 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -0700977 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900978 }
979 return NULL;
980}
981
Todd Kjos425d23f2017-06-12 12:07:26 -0700982static struct binder_node *binder_get_node(struct binder_proc *proc,
983 binder_uintptr_t ptr)
984{
985 struct binder_node *node;
986
987 binder_inner_proc_lock(proc);
988 node = binder_get_node_ilocked(proc, ptr);
989 binder_inner_proc_unlock(proc);
990 return node;
991}
992
993static struct binder_node *binder_init_node_ilocked(
994 struct binder_proc *proc,
995 struct binder_node *new_node,
996 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900997{
998 struct rb_node **p = &proc->nodes.rb_node;
999 struct rb_node *parent = NULL;
1000 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001001 binder_uintptr_t ptr = fp ? fp->binder : 0;
1002 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1003 __u32 flags = fp ? fp->flags : 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001004
Todd Kjos425d23f2017-06-12 12:07:26 -07001005 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001006 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001007
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001008 parent = *p;
1009 node = rb_entry(parent, struct binder_node, rb_node);
1010
1011 if (ptr < node->ptr)
1012 p = &(*p)->rb_left;
1013 else if (ptr > node->ptr)
1014 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001015 else {
1016 /*
1017 * A matching node is already in
1018 * the rb tree. Abandon the init
1019 * and return it.
1020 */
1021 binder_inc_node_tmpref_ilocked(node);
1022 return node;
1023 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001024 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001025 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001026 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001027 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001028 rb_link_node(&node->rb_node, parent, p);
1029 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001030 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001031 node->proc = proc;
1032 node->ptr = ptr;
1033 node->cookie = cookie;
1034 node->work.type = BINDER_WORK_NODE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001035 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1036 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001037 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001038 INIT_LIST_HEAD(&node->work.entry);
1039 INIT_LIST_HEAD(&node->async_todo);
1040 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001041 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001042 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001043 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001044
1045 return node;
1046}
1047
1048static struct binder_node *binder_new_node(struct binder_proc *proc,
1049 struct flat_binder_object *fp)
1050{
1051 struct binder_node *node;
1052 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1053
1054 if (!new_node)
1055 return NULL;
1056 binder_inner_proc_lock(proc);
1057 node = binder_init_node_ilocked(proc, new_node, fp);
1058 binder_inner_proc_unlock(proc);
1059 if (node != new_node)
1060 /*
1061 * The node was already added by another thread
1062 */
1063 kfree(new_node);
1064
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001065 return node;
1066}
1067
Todd Kjose7f23ed2017-03-21 13:06:01 -07001068static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001069{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001070 kfree(node);
1071 binder_stats_deleted(BINDER_STAT_NODE);
1072}
1073
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001074static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1075 int internal,
1076 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001077{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001078 struct binder_proc *proc = node->proc;
1079
1080 BUG_ON(!spin_is_locked(&node->lock));
1081 if (proc)
1082 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001083 if (strong) {
1084 if (internal) {
1085 if (target_list == NULL &&
1086 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001087 !(node->proc &&
1088 node == node->proc->context->
1089 binder_context_mgr_node &&
1090 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301091 pr_err("invalid inc strong node for %d\n",
1092 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001093 return -EINVAL;
1094 }
1095 node->internal_strong_refs++;
1096 } else
1097 node->local_strong_refs++;
1098 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001099 binder_dequeue_work_ilocked(&node->work);
1100 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001101 }
1102 } else {
1103 if (!internal)
1104 node->local_weak_refs++;
1105 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1106 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301107 pr_err("invalid inc weak node for %d\n",
1108 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001109 return -EINVAL;
1110 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001111 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001112 }
1113 }
1114 return 0;
1115}
1116
Todd Kjose7f23ed2017-03-21 13:06:01 -07001117static int binder_inc_node(struct binder_node *node, int strong, int internal,
1118 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001119{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001120 int ret;
1121
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001122 binder_node_inner_lock(node);
1123 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1124 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001125
1126 return ret;
1127}
1128
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001129static bool binder_dec_node_nilocked(struct binder_node *node,
1130 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001131{
1132 struct binder_proc *proc = node->proc;
1133
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001134 BUG_ON(!spin_is_locked(&node->lock));
Todd Kjose7f23ed2017-03-21 13:06:01 -07001135 if (proc)
1136 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001137 if (strong) {
1138 if (internal)
1139 node->internal_strong_refs--;
1140 else
1141 node->local_strong_refs--;
1142 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001143 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001144 } else {
1145 if (!internal)
1146 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001147 if (node->local_weak_refs || node->tmp_refs ||
1148 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001149 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001150 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001151
1152 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001153 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001154 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001155 wake_up_interruptible(&node->proc->wait);
1156 }
1157 } else {
1158 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001159 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001160 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001161 binder_dequeue_work_ilocked(&node->work);
1162 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001163 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301164 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001165 node->debug_id);
1166 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001167 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001168 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001169 /*
1170 * tmp_refs could have changed so
1171 * check it again
1172 */
1173 if (node->tmp_refs) {
1174 spin_unlock(&binder_dead_nodes_lock);
1175 return false;
1176 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001177 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001178 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001179 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301180 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001181 node->debug_id);
1182 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001183 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001184 }
1185 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001186 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001187}
1188
Todd Kjose7f23ed2017-03-21 13:06:01 -07001189static void binder_dec_node(struct binder_node *node, int strong, int internal)
1190{
1191 bool free_node;
1192
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001193 binder_node_inner_lock(node);
1194 free_node = binder_dec_node_nilocked(node, strong, internal);
1195 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001196 if (free_node)
1197 binder_free_node(node);
1198}
1199
1200static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001201{
1202 /*
1203 * No call to binder_inc_node() is needed since we
1204 * don't need to inform userspace of any changes to
1205 * tmp_refs
1206 */
1207 node->tmp_refs++;
1208}
1209
1210/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001211 * binder_inc_node_tmpref() - take a temporary reference on node
1212 * @node: node to reference
1213 *
1214 * Take reference on node to prevent the node from being freed
1215 * while referenced only by a local variable. The inner lock is
1216 * needed to serialize with the node work on the queue (which
1217 * isn't needed after the node is dead). If the node is dead
1218 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1219 * node->tmp_refs against dead-node-only cases where the node
1220 * lock cannot be acquired (eg traversing the dead node list to
1221 * print nodes)
1222 */
1223static void binder_inc_node_tmpref(struct binder_node *node)
1224{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001225 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001226 if (node->proc)
1227 binder_inner_proc_lock(node->proc);
1228 else
1229 spin_lock(&binder_dead_nodes_lock);
1230 binder_inc_node_tmpref_ilocked(node);
1231 if (node->proc)
1232 binder_inner_proc_unlock(node->proc);
1233 else
1234 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001235 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001236}
1237
1238/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001239 * binder_dec_node_tmpref() - remove a temporary reference on node
1240 * @node: node to reference
1241 *
1242 * Release temporary reference on node taken via binder_inc_node_tmpref()
1243 */
1244static void binder_dec_node_tmpref(struct binder_node *node)
1245{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001246 bool free_node;
1247
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001248 binder_node_inner_lock(node);
1249 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001250 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001251 node->tmp_refs--;
1252 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001253 if (!node->proc)
1254 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001255 /*
1256 * Call binder_dec_node() to check if all refcounts are 0
1257 * and cleanup is needed. Calling with strong=0 and internal=1
1258 * causes no actual reference to be released in binder_dec_node().
1259 * If that changes, a change is needed here too.
1260 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001261 free_node = binder_dec_node_nilocked(node, 0, 1);
1262 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001263 if (free_node)
1264 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001265}
1266
1267static void binder_put_node(struct binder_node *node)
1268{
1269 binder_dec_node_tmpref(node);
1270}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001271
1272static struct binder_ref *binder_get_ref(struct binder_proc *proc,
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001273 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001274{
1275 struct rb_node *n = proc->refs_by_desc.rb_node;
1276 struct binder_ref *ref;
1277
1278 while (n) {
1279 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1280
Todd Kjosb0117bb2017-05-08 09:16:27 -07001281 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001282 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001283 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001284 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001285 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001286 binder_user_error("tried to use weak ref as strong ref\n");
1287 return NULL;
1288 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001289 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001290 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001291 }
1292 return NULL;
1293}
1294
Todd Kjosb0117bb2017-05-08 09:16:27 -07001295/**
1296 * binder_get_ref_for_node() - get the ref associated with given node
1297 * @proc: binder_proc that owns the ref
1298 * @node: binder_node of target
1299 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1300 *
1301 * Look up the ref for the given node and return it if it exists
1302 *
1303 * If it doesn't exist and the caller provides a newly allocated
1304 * ref, initialize the fields of the newly allocated ref and insert
1305 * into the given proc rb_trees and node refs list.
1306 *
1307 * Return: the ref for node. It is possible that another thread
1308 * allocated/initialized the ref first in which case the
1309 * returned ref would be different than the passed-in
1310 * new_ref. new_ref must be kfree'd by the caller in
1311 * this case.
1312 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001313static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
Todd Kjosb0117bb2017-05-08 09:16:27 -07001314 struct binder_node *node,
1315 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001316{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001317 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001318 struct rb_node **p = &proc->refs_by_node.rb_node;
1319 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001320 struct binder_ref *ref;
1321 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001322
1323 while (*p) {
1324 parent = *p;
1325 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1326
1327 if (node < ref->node)
1328 p = &(*p)->rb_left;
1329 else if (node > ref->node)
1330 p = &(*p)->rb_right;
1331 else
1332 return ref;
1333 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001334 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001335 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001336
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001337 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001338 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001339 new_ref->proc = proc;
1340 new_ref->node = node;
1341 rb_link_node(&new_ref->rb_node_node, parent, p);
1342 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1343
Todd Kjosb0117bb2017-05-08 09:16:27 -07001344 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001345 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1346 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001347 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001348 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001349 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001350 }
1351
1352 p = &proc->refs_by_desc.rb_node;
1353 while (*p) {
1354 parent = *p;
1355 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1356
Todd Kjosb0117bb2017-05-08 09:16:27 -07001357 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001358 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001359 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001360 p = &(*p)->rb_right;
1361 else
1362 BUG();
1363 }
1364 rb_link_node(&new_ref->rb_node_desc, parent, p);
1365 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001366
1367 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001368 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001369
Todd Kjos4cbe5752017-05-01 17:21:51 -07001370 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1371 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001372 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001373 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001374 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001375 return new_ref;
1376}
1377
Todd Kjosb0117bb2017-05-08 09:16:27 -07001378static void binder_cleanup_ref(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001379{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001380 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001381
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001382 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301383 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001384 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301385 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001386
1387 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1388 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001389
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001390 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001391 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001392 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001393
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001394 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001395 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1396 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001397 /*
1398 * Clear ref->node unless we want the caller to free the node
1399 */
1400 if (!delete_node) {
1401 /*
1402 * The caller uses ref->node to determine
1403 * whether the node needs to be freed. Clear
1404 * it since the node is still alive.
1405 */
1406 ref->node = NULL;
1407 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001408
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001409 if (ref->death) {
1410 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301411 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001412 ref->proc->pid, ref->data.debug_id,
1413 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001414 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001415 binder_stats_deleted(BINDER_STAT_DEATH);
1416 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001417 binder_stats_deleted(BINDER_STAT_REF);
1418}
1419
Todd Kjosb0117bb2017-05-08 09:16:27 -07001420/**
1421 * binder_inc_ref() - increment the ref for given handle
1422 * @ref: ref to be incremented
1423 * @strong: if true, strong increment, else weak
1424 * @target_list: list to queue node work on
1425 *
1426 * Increment the ref.
1427 *
1428 * Return: 0, if successful, else errno
1429 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001430static int binder_inc_ref(struct binder_ref *ref, int strong,
1431 struct list_head *target_list)
1432{
1433 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001434
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001435 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001436 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001437 ret = binder_inc_node(ref->node, 1, 1, target_list);
1438 if (ret)
1439 return ret;
1440 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001441 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001442 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001443 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001444 ret = binder_inc_node(ref->node, 0, 1, target_list);
1445 if (ret)
1446 return ret;
1447 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001448 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001449 }
1450 return 0;
1451}
1452
Todd Kjosb0117bb2017-05-08 09:16:27 -07001453/**
1454 * binder_dec_ref() - dec the ref for given handle
1455 * @ref: ref to be decremented
1456 * @strong: if true, strong decrement, else weak
1457 *
1458 * Decrement the ref.
1459 *
1460 * TODO: kfree is avoided here since an upcoming patch
1461 * will put this under a lock.
1462 *
1463 * Return: true if ref is cleaned up and ready to be freed
1464 */
1465static bool binder_dec_ref(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001466{
1467 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001468 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301469 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001470 ref->proc->pid, ref->data.debug_id,
1471 ref->data.desc, ref->data.strong,
1472 ref->data.weak);
1473 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001474 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001475 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001476 if (ref->data.strong == 0)
1477 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001478 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001479 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301480 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001481 ref->proc->pid, ref->data.debug_id,
1482 ref->data.desc, ref->data.strong,
1483 ref->data.weak);
1484 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001485 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001486 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001487 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001488 if (ref->data.strong == 0 && ref->data.weak == 0) {
1489 binder_cleanup_ref(ref);
1490 /*
1491 * TODO: we could kfree(ref) here, but an upcoming
1492 * patch will call this with a lock held, so we
1493 * return an indication that the ref should be
1494 * freed.
1495 */
1496 return true;
1497 }
1498 return false;
1499}
1500
1501/**
1502 * binder_get_node_from_ref() - get the node from the given proc/desc
1503 * @proc: proc containing the ref
1504 * @desc: the handle associated with the ref
1505 * @need_strong_ref: if true, only return node if ref is strong
1506 * @rdata: the id/refcount data for the ref
1507 *
1508 * Given a proc and ref handle, return the associated binder_node
1509 *
1510 * Return: a binder_node or NULL if not found or not strong when strong required
1511 */
1512static struct binder_node *binder_get_node_from_ref(
1513 struct binder_proc *proc,
1514 u32 desc, bool need_strong_ref,
1515 struct binder_ref_data *rdata)
1516{
1517 struct binder_node *node;
1518 struct binder_ref *ref;
1519
1520 ref = binder_get_ref(proc, desc, need_strong_ref);
1521 if (!ref)
1522 goto err_no_ref;
1523 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001524 /*
1525 * Take an implicit reference on the node to ensure
1526 * it stays alive until the call to binder_put_node()
1527 */
1528 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001529 if (rdata)
1530 *rdata = ref->data;
1531
1532 return node;
1533
1534err_no_ref:
1535 return NULL;
1536}
1537
1538/**
1539 * binder_free_ref() - free the binder_ref
1540 * @ref: ref to free
1541 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001542 * Free the binder_ref. Free the binder_node indicated by ref->node
1543 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001544 */
1545static void binder_free_ref(struct binder_ref *ref)
1546{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001547 if (ref->node)
1548 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001549 kfree(ref->death);
1550 kfree(ref);
1551}
1552
1553/**
1554 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1555 * @proc: proc containing the ref
1556 * @desc: the handle associated with the ref
1557 * @increment: true=inc reference, false=dec reference
1558 * @strong: true=strong reference, false=weak reference
1559 * @rdata: the id/refcount data for the ref
1560 *
1561 * Given a proc and ref handle, increment or decrement the ref
1562 * according to "increment" arg.
1563 *
1564 * Return: 0 if successful, else errno
1565 */
1566static int binder_update_ref_for_handle(struct binder_proc *proc,
1567 uint32_t desc, bool increment, bool strong,
1568 struct binder_ref_data *rdata)
1569{
1570 int ret = 0;
1571 struct binder_ref *ref;
1572 bool delete_ref = false;
1573
1574 ref = binder_get_ref(proc, desc, strong);
1575 if (!ref) {
1576 ret = -EINVAL;
1577 goto err_no_ref;
1578 }
1579 if (increment)
1580 ret = binder_inc_ref(ref, strong, NULL);
1581 else
1582 delete_ref = binder_dec_ref(ref, strong);
1583
1584 if (rdata)
1585 *rdata = ref->data;
1586
1587 if (delete_ref)
1588 binder_free_ref(ref);
1589 return ret;
1590
1591err_no_ref:
1592 return ret;
1593}
1594
1595/**
1596 * binder_dec_ref_for_handle() - dec the ref for given handle
1597 * @proc: proc containing the ref
1598 * @desc: the handle associated with the ref
1599 * @strong: true=strong reference, false=weak reference
1600 * @rdata: the id/refcount data for the ref
1601 *
1602 * Just calls binder_update_ref_for_handle() to decrement the ref.
1603 *
1604 * Return: 0 if successful, else errno
1605 */
1606static int binder_dec_ref_for_handle(struct binder_proc *proc,
1607 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1608{
1609 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1610}
1611
1612
1613/**
1614 * binder_inc_ref_for_node() - increment the ref for given proc/node
1615 * @proc: proc containing the ref
1616 * @node: target node
1617 * @strong: true=strong reference, false=weak reference
1618 * @target_list: worklist to use if node is incremented
1619 * @rdata: the id/refcount data for the ref
1620 *
1621 * Given a proc and node, increment the ref. Create the ref if it
1622 * doesn't already exist
1623 *
1624 * Return: 0 if successful, else errno
1625 */
1626static int binder_inc_ref_for_node(struct binder_proc *proc,
1627 struct binder_node *node,
1628 bool strong,
1629 struct list_head *target_list,
1630 struct binder_ref_data *rdata)
1631{
1632 struct binder_ref *ref;
1633 struct binder_ref *new_ref = NULL;
1634 int ret = 0;
1635
1636 ref = binder_get_ref_for_node(proc, node, NULL);
1637 if (!ref) {
1638 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1639 if (!new_ref)
1640 return -ENOMEM;
1641 ref = binder_get_ref_for_node(proc, node, new_ref);
1642 }
1643 ret = binder_inc_ref(ref, strong, target_list);
1644 *rdata = ref->data;
1645 if (new_ref && ref != new_ref)
1646 /*
1647 * Another thread created the ref first so
1648 * free the one we allocated
1649 */
1650 kfree(new_ref);
1651 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001652}
1653
Martijn Coenen995a36e2017-06-02 13:36:52 -07001654static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1655 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001656{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001657 BUG_ON(!target_thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07001658 BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
Todd Kjos21ef40a2017-03-30 18:02:13 -07001659 BUG_ON(target_thread->transaction_stack != t);
1660 BUG_ON(target_thread->transaction_stack->from != target_thread);
1661 target_thread->transaction_stack =
1662 target_thread->transaction_stack->from_parent;
1663 t->from = NULL;
1664}
1665
Todd Kjos2f993e22017-05-12 14:42:55 -07001666/**
1667 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1668 * @thread: thread to decrement
1669 *
1670 * A thread needs to be kept alive while being used to create or
1671 * handle a transaction. binder_get_txn_from() is used to safely
1672 * extract t->from from a binder_transaction and keep the thread
1673 * indicated by t->from from being freed. When done with that
1674 * binder_thread, this function is called to decrement the
1675 * tmp_ref and free if appropriate (thread has been released
1676 * and no transaction being processed by the driver)
1677 */
1678static void binder_thread_dec_tmpref(struct binder_thread *thread)
1679{
1680 /*
1681 * atomic is used to protect the counter value while
1682 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07001683 */
Todd Kjosb4827902017-05-25 15:52:17 -07001684 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001685 atomic_dec(&thread->tmp_ref);
1686 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07001687 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001688 binder_free_thread(thread);
1689 return;
1690 }
Todd Kjosb4827902017-05-25 15:52:17 -07001691 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001692}
1693
1694/**
1695 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1696 * @proc: proc to decrement
1697 *
1698 * A binder_proc needs to be kept alive while being used to create or
1699 * handle a transaction. proc->tmp_ref is incremented when
1700 * creating a new transaction or the binder_proc is currently in-use
1701 * by threads that are being released. When done with the binder_proc,
1702 * this function is called to decrement the counter and free the
1703 * proc if appropriate (proc has been released, all threads have
1704 * been released and not currenly in-use to process a transaction).
1705 */
1706static void binder_proc_dec_tmpref(struct binder_proc *proc)
1707{
Todd Kjosb4827902017-05-25 15:52:17 -07001708 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001709 proc->tmp_ref--;
1710 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1711 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07001712 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001713 binder_free_proc(proc);
1714 return;
1715 }
Todd Kjosb4827902017-05-25 15:52:17 -07001716 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001717}
1718
1719/**
1720 * binder_get_txn_from() - safely extract the "from" thread in transaction
1721 * @t: binder transaction for t->from
1722 *
1723 * Atomically return the "from" thread and increment the tmp_ref
1724 * count for the thread to ensure it stays alive until
1725 * binder_thread_dec_tmpref() is called.
1726 *
1727 * Return: the value of t->from
1728 */
1729static struct binder_thread *binder_get_txn_from(
1730 struct binder_transaction *t)
1731{
1732 struct binder_thread *from;
1733
1734 spin_lock(&t->lock);
1735 from = t->from;
1736 if (from)
1737 atomic_inc(&from->tmp_ref);
1738 spin_unlock(&t->lock);
1739 return from;
1740}
1741
Martijn Coenen995a36e2017-06-02 13:36:52 -07001742/**
1743 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1744 * @t: binder transaction for t->from
1745 *
1746 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1747 * to guarantee that the thread cannot be released while operating on it.
1748 * The caller must call binder_inner_proc_unlock() to release the inner lock
1749 * as well as call binder_dec_thread_txn() to release the reference.
1750 *
1751 * Return: the value of t->from
1752 */
1753static struct binder_thread *binder_get_txn_from_and_acq_inner(
1754 struct binder_transaction *t)
1755{
1756 struct binder_thread *from;
1757
1758 from = binder_get_txn_from(t);
1759 if (!from)
1760 return NULL;
1761 binder_inner_proc_lock(from->proc);
1762 if (t->from) {
1763 BUG_ON(from != t->from);
1764 return from;
1765 }
1766 binder_inner_proc_unlock(from->proc);
1767 binder_thread_dec_tmpref(from);
1768 return NULL;
1769}
1770
Todd Kjos21ef40a2017-03-30 18:02:13 -07001771static void binder_free_transaction(struct binder_transaction *t)
1772{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001773 if (t->buffer)
1774 t->buffer->transaction = NULL;
1775 kfree(t);
1776 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1777}
1778
1779static void binder_send_failed_reply(struct binder_transaction *t,
1780 uint32_t error_code)
1781{
1782 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001783 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001784
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001785 BUG_ON(t->flags & TF_ONE_WAY);
1786 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07001787 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001788 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07001789 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1790 "send failed reply for transaction %d to %d:%d\n",
1791 t->debug_id,
1792 target_thread->proc->pid,
1793 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001794
Martijn Coenen995a36e2017-06-02 13:36:52 -07001795 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07001796 if (target_thread->reply_error.cmd == BR_OK) {
1797 target_thread->reply_error.cmd = error_code;
Martijn Coenen995a36e2017-06-02 13:36:52 -07001798 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001799 &target_thread->reply_error.work,
Todd Kjos858b8da2017-04-21 17:35:12 -07001800 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001801 wake_up_interruptible(&target_thread->wait);
1802 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07001803 WARN(1, "Unexpected reply error: %u\n",
1804 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001805 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07001806 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001807 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07001808 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001809 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001810 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001811 next = t->from_parent;
1812
1813 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1814 "send failed reply for transaction %d, target dead\n",
1815 t->debug_id);
1816
Todd Kjos21ef40a2017-03-30 18:02:13 -07001817 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001818 if (next == NULL) {
1819 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1820 "reply failed, no target thread at root\n");
1821 return;
1822 }
1823 t = next;
1824 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1825 "reply failed, no target thread -- retry %d\n",
1826 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001827 }
1828}
1829
Martijn Coenen00c80372016-07-13 12:06:49 +02001830/**
1831 * binder_validate_object() - checks for a valid metadata object in a buffer.
1832 * @buffer: binder_buffer that we're parsing.
1833 * @offset: offset in the buffer at which to validate an object.
1834 *
1835 * Return: If there's a valid metadata object at @offset in @buffer, the
1836 * size of that object. Otherwise, it returns zero.
1837 */
1838static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1839{
1840 /* Check if we can read a header first */
1841 struct binder_object_header *hdr;
1842 size_t object_size = 0;
1843
1844 if (offset > buffer->data_size - sizeof(*hdr) ||
1845 buffer->data_size < sizeof(*hdr) ||
1846 !IS_ALIGNED(offset, sizeof(u32)))
1847 return 0;
1848
1849 /* Ok, now see if we can read a complete object. */
1850 hdr = (struct binder_object_header *)(buffer->data + offset);
1851 switch (hdr->type) {
1852 case BINDER_TYPE_BINDER:
1853 case BINDER_TYPE_WEAK_BINDER:
1854 case BINDER_TYPE_HANDLE:
1855 case BINDER_TYPE_WEAK_HANDLE:
1856 object_size = sizeof(struct flat_binder_object);
1857 break;
1858 case BINDER_TYPE_FD:
1859 object_size = sizeof(struct binder_fd_object);
1860 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02001861 case BINDER_TYPE_PTR:
1862 object_size = sizeof(struct binder_buffer_object);
1863 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02001864 case BINDER_TYPE_FDA:
1865 object_size = sizeof(struct binder_fd_array_object);
1866 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02001867 default:
1868 return 0;
1869 }
1870 if (offset <= buffer->data_size - object_size &&
1871 buffer->data_size >= object_size)
1872 return object_size;
1873 else
1874 return 0;
1875}
1876
Martijn Coenen5a6da532016-09-30 14:10:07 +02001877/**
1878 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1879 * @b: binder_buffer containing the object
1880 * @index: index in offset array at which the binder_buffer_object is
1881 * located
1882 * @start: points to the start of the offset array
1883 * @num_valid: the number of valid offsets in the offset array
1884 *
1885 * Return: If @index is within the valid range of the offset array
1886 * described by @start and @num_valid, and if there's a valid
1887 * binder_buffer_object at the offset found in index @index
1888 * of the offset array, that object is returned. Otherwise,
1889 * %NULL is returned.
1890 * Note that the offset found in index @index itself is not
1891 * verified; this function assumes that @num_valid elements
1892 * from @start were previously verified to have valid offsets.
1893 */
1894static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1895 binder_size_t index,
1896 binder_size_t *start,
1897 binder_size_t num_valid)
1898{
1899 struct binder_buffer_object *buffer_obj;
1900 binder_size_t *offp;
1901
1902 if (index >= num_valid)
1903 return NULL;
1904
1905 offp = start + index;
1906 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1907 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1908 return NULL;
1909
1910 return buffer_obj;
1911}
1912
1913/**
1914 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1915 * @b: transaction buffer
1916 * @objects_start start of objects buffer
1917 * @buffer: binder_buffer_object in which to fix up
1918 * @offset: start offset in @buffer to fix up
1919 * @last_obj: last binder_buffer_object that we fixed up in
1920 * @last_min_offset: minimum fixup offset in @last_obj
1921 *
1922 * Return: %true if a fixup in buffer @buffer at offset @offset is
1923 * allowed.
1924 *
1925 * For safety reasons, we only allow fixups inside a buffer to happen
1926 * at increasing offsets; additionally, we only allow fixup on the last
1927 * buffer object that was verified, or one of its parents.
1928 *
1929 * Example of what is allowed:
1930 *
1931 * A
1932 * B (parent = A, offset = 0)
1933 * C (parent = A, offset = 16)
1934 * D (parent = C, offset = 0)
1935 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1936 *
1937 * Examples of what is not allowed:
1938 *
1939 * Decreasing offsets within the same parent:
1940 * A
1941 * C (parent = A, offset = 16)
1942 * B (parent = A, offset = 0) // decreasing offset within A
1943 *
1944 * Referring to a parent that wasn't the last object or any of its parents:
1945 * A
1946 * B (parent = A, offset = 0)
1947 * C (parent = A, offset = 0)
1948 * C (parent = A, offset = 16)
1949 * D (parent = B, offset = 0) // B is not A or any of A's parents
1950 */
1951static bool binder_validate_fixup(struct binder_buffer *b,
1952 binder_size_t *objects_start,
1953 struct binder_buffer_object *buffer,
1954 binder_size_t fixup_offset,
1955 struct binder_buffer_object *last_obj,
1956 binder_size_t last_min_offset)
1957{
1958 if (!last_obj) {
1959 /* Nothing to fix up in */
1960 return false;
1961 }
1962
1963 while (last_obj != buffer) {
1964 /*
1965 * Safe to retrieve the parent of last_obj, since it
1966 * was already previously verified by the driver.
1967 */
1968 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1969 return false;
1970 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1971 last_obj = (struct binder_buffer_object *)
1972 (b->data + *(objects_start + last_obj->parent));
1973 }
1974 return (fixup_offset >= last_min_offset);
1975}
1976
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001977static void binder_transaction_buffer_release(struct binder_proc *proc,
1978 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001979 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001980{
Martijn Coenen5a6da532016-09-30 14:10:07 +02001981 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001982 int debug_id = buffer->debug_id;
1983
1984 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301985 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001986 proc->pid, buffer->debug_id,
1987 buffer->data_size, buffer->offsets_size, failed_at);
1988
1989 if (buffer->target_node)
1990 binder_dec_node(buffer->target_node, 1, 0);
1991
Martijn Coenen5a6da532016-09-30 14:10:07 +02001992 off_start = (binder_size_t *)(buffer->data +
1993 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001994 if (failed_at)
1995 off_end = failed_at;
1996 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02001997 off_end = (void *)off_start + buffer->offsets_size;
1998 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02001999 struct binder_object_header *hdr;
2000 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002001
Martijn Coenen00c80372016-07-13 12:06:49 +02002002 if (object_size == 0) {
2003 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002004 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002005 continue;
2006 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002007 hdr = (struct binder_object_header *)(buffer->data + *offp);
2008 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002009 case BINDER_TYPE_BINDER:
2010 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002011 struct flat_binder_object *fp;
2012 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002013
Martijn Coenen00c80372016-07-13 12:06:49 +02002014 fp = to_flat_binder_object(hdr);
2015 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002016 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002017 pr_err("transaction release %d bad node %016llx\n",
2018 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002019 break;
2020 }
2021 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002022 " node %d u%016llx\n",
2023 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002024 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2025 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002026 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002027 } break;
2028 case BINDER_TYPE_HANDLE:
2029 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002030 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002031 struct binder_ref_data rdata;
2032 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002033
Martijn Coenen00c80372016-07-13 12:06:49 +02002034 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002035 ret = binder_dec_ref_for_handle(proc, fp->handle,
2036 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2037
2038 if (ret) {
2039 pr_err("transaction release %d bad handle %d, ret = %d\n",
2040 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002041 break;
2042 }
2043 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002044 " ref %d desc %d\n",
2045 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002046 } break;
2047
Martijn Coenen00c80372016-07-13 12:06:49 +02002048 case BINDER_TYPE_FD: {
2049 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2050
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002051 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002052 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002053 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002054 task_close_fd(proc, fp->fd);
2055 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002056 case BINDER_TYPE_PTR:
2057 /*
2058 * Nothing to do here, this will get cleaned up when the
2059 * transaction buffer gets freed
2060 */
2061 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002062 case BINDER_TYPE_FDA: {
2063 struct binder_fd_array_object *fda;
2064 struct binder_buffer_object *parent;
2065 uintptr_t parent_buffer;
2066 u32 *fd_array;
2067 size_t fd_index;
2068 binder_size_t fd_buf_size;
2069
2070 fda = to_binder_fd_array_object(hdr);
2071 parent = binder_validate_ptr(buffer, fda->parent,
2072 off_start,
2073 offp - off_start);
2074 if (!parent) {
2075 pr_err("transaction release %d bad parent offset",
2076 debug_id);
2077 continue;
2078 }
2079 /*
2080 * Since the parent was already fixed up, convert it
2081 * back to kernel address space to access it
2082 */
2083 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002084 binder_alloc_get_user_buffer_offset(
2085 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002086
2087 fd_buf_size = sizeof(u32) * fda->num_fds;
2088 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2089 pr_err("transaction release %d invalid number of fds (%lld)\n",
2090 debug_id, (u64)fda->num_fds);
2091 continue;
2092 }
2093 if (fd_buf_size > parent->length ||
2094 fda->parent_offset > parent->length - fd_buf_size) {
2095 /* No space for all file descriptors here. */
2096 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2097 debug_id, (u64)fda->num_fds);
2098 continue;
2099 }
2100 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2101 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2102 task_close_fd(proc, fd_array[fd_index]);
2103 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002104 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002105 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002106 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002107 break;
2108 }
2109 }
2110}
2111
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002112static int binder_translate_binder(struct flat_binder_object *fp,
2113 struct binder_transaction *t,
2114 struct binder_thread *thread)
2115{
2116 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002117 struct binder_proc *proc = thread->proc;
2118 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002119 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002120 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002121
2122 node = binder_get_node(proc, fp->binder);
2123 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002124 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002125 if (!node)
2126 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002127 }
2128 if (fp->cookie != node->cookie) {
2129 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2130 proc->pid, thread->pid, (u64)fp->binder,
2131 node->debug_id, (u64)fp->cookie,
2132 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002133 ret = -EINVAL;
2134 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002135 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002136 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2137 ret = -EPERM;
2138 goto done;
2139 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002140
Todd Kjosb0117bb2017-05-08 09:16:27 -07002141 ret = binder_inc_ref_for_node(target_proc, node,
2142 fp->hdr.type == BINDER_TYPE_BINDER,
2143 &thread->todo, &rdata);
2144 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002145 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002146
2147 if (fp->hdr.type == BINDER_TYPE_BINDER)
2148 fp->hdr.type = BINDER_TYPE_HANDLE;
2149 else
2150 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2151 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002152 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002153 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002154
Todd Kjosb0117bb2017-05-08 09:16:27 -07002155 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002156 binder_debug(BINDER_DEBUG_TRANSACTION,
2157 " node %d u%016llx -> ref %d desc %d\n",
2158 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002159 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002160done:
2161 binder_put_node(node);
2162 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002163}
2164
2165static int binder_translate_handle(struct flat_binder_object *fp,
2166 struct binder_transaction *t,
2167 struct binder_thread *thread)
2168{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002169 struct binder_proc *proc = thread->proc;
2170 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002171 struct binder_node *node;
2172 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002173 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002174
Todd Kjosb0117bb2017-05-08 09:16:27 -07002175 node = binder_get_node_from_ref(proc, fp->handle,
2176 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2177 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002178 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2179 proc->pid, thread->pid, fp->handle);
2180 return -EINVAL;
2181 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002182 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2183 ret = -EPERM;
2184 goto done;
2185 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002186
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002187 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002188 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002189 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2190 fp->hdr.type = BINDER_TYPE_BINDER;
2191 else
2192 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002193 fp->binder = node->ptr;
2194 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002195 if (node->proc)
2196 binder_inner_proc_lock(node->proc);
2197 binder_inc_node_nilocked(node,
2198 fp->hdr.type == BINDER_TYPE_BINDER,
2199 0, NULL);
2200 if (node->proc)
2201 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002202 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002203 binder_debug(BINDER_DEBUG_TRANSACTION,
2204 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002205 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2206 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002207 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002208 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002209 int ret;
2210 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002211
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002212 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002213 ret = binder_inc_ref_for_node(target_proc, node,
2214 fp->hdr.type == BINDER_TYPE_HANDLE,
2215 NULL, &dest_rdata);
2216 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002217 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002218
2219 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002220 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002221 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002222 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2223 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002224 binder_debug(BINDER_DEBUG_TRANSACTION,
2225 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002226 src_rdata.debug_id, src_rdata.desc,
2227 dest_rdata.debug_id, dest_rdata.desc,
2228 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002229 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002230done:
2231 binder_put_node(node);
2232 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002233}
2234
2235static int binder_translate_fd(int fd,
2236 struct binder_transaction *t,
2237 struct binder_thread *thread,
2238 struct binder_transaction *in_reply_to)
2239{
2240 struct binder_proc *proc = thread->proc;
2241 struct binder_proc *target_proc = t->to_proc;
2242 int target_fd;
2243 struct file *file;
2244 int ret;
2245 bool target_allows_fd;
2246
2247 if (in_reply_to)
2248 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2249 else
2250 target_allows_fd = t->buffer->target_node->accept_fds;
2251 if (!target_allows_fd) {
2252 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2253 proc->pid, thread->pid,
2254 in_reply_to ? "reply" : "transaction",
2255 fd);
2256 ret = -EPERM;
2257 goto err_fd_not_accepted;
2258 }
2259
2260 file = fget(fd);
2261 if (!file) {
2262 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2263 proc->pid, thread->pid, fd);
2264 ret = -EBADF;
2265 goto err_fget;
2266 }
2267 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2268 if (ret < 0) {
2269 ret = -EPERM;
2270 goto err_security;
2271 }
2272
2273 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2274 if (target_fd < 0) {
2275 ret = -ENOMEM;
2276 goto err_get_unused_fd;
2277 }
2278 task_fd_install(target_proc, target_fd, file);
2279 trace_binder_transaction_fd(t, fd, target_fd);
2280 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2281 fd, target_fd);
2282
2283 return target_fd;
2284
2285err_get_unused_fd:
2286err_security:
2287 fput(file);
2288err_fget:
2289err_fd_not_accepted:
2290 return ret;
2291}
2292
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002293static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2294 struct binder_buffer_object *parent,
2295 struct binder_transaction *t,
2296 struct binder_thread *thread,
2297 struct binder_transaction *in_reply_to)
2298{
2299 binder_size_t fdi, fd_buf_size, num_installed_fds;
2300 int target_fd;
2301 uintptr_t parent_buffer;
2302 u32 *fd_array;
2303 struct binder_proc *proc = thread->proc;
2304 struct binder_proc *target_proc = t->to_proc;
2305
2306 fd_buf_size = sizeof(u32) * fda->num_fds;
2307 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2308 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2309 proc->pid, thread->pid, (u64)fda->num_fds);
2310 return -EINVAL;
2311 }
2312 if (fd_buf_size > parent->length ||
2313 fda->parent_offset > parent->length - fd_buf_size) {
2314 /* No space for all file descriptors here. */
2315 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2316 proc->pid, thread->pid, (u64)fda->num_fds);
2317 return -EINVAL;
2318 }
2319 /*
2320 * Since the parent was already fixed up, convert it
2321 * back to the kernel address space to access it
2322 */
Todd Kjosd325d372016-10-10 10:40:53 -07002323 parent_buffer = parent->buffer -
2324 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002325 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2326 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2327 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2328 proc->pid, thread->pid);
2329 return -EINVAL;
2330 }
2331 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2332 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2333 in_reply_to);
2334 if (target_fd < 0)
2335 goto err_translate_fd_failed;
2336 fd_array[fdi] = target_fd;
2337 }
2338 return 0;
2339
2340err_translate_fd_failed:
2341 /*
2342 * Failed to allocate fd or security error, free fds
2343 * installed so far.
2344 */
2345 num_installed_fds = fdi;
2346 for (fdi = 0; fdi < num_installed_fds; fdi++)
2347 task_close_fd(target_proc, fd_array[fdi]);
2348 return target_fd;
2349}
2350
Martijn Coenen5a6da532016-09-30 14:10:07 +02002351static int binder_fixup_parent(struct binder_transaction *t,
2352 struct binder_thread *thread,
2353 struct binder_buffer_object *bp,
2354 binder_size_t *off_start,
2355 binder_size_t num_valid,
2356 struct binder_buffer_object *last_fixup_obj,
2357 binder_size_t last_fixup_min_off)
2358{
2359 struct binder_buffer_object *parent;
2360 u8 *parent_buffer;
2361 struct binder_buffer *b = t->buffer;
2362 struct binder_proc *proc = thread->proc;
2363 struct binder_proc *target_proc = t->to_proc;
2364
2365 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2366 return 0;
2367
2368 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2369 if (!parent) {
2370 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2371 proc->pid, thread->pid);
2372 return -EINVAL;
2373 }
2374
2375 if (!binder_validate_fixup(b, off_start,
2376 parent, bp->parent_offset,
2377 last_fixup_obj,
2378 last_fixup_min_off)) {
2379 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2380 proc->pid, thread->pid);
2381 return -EINVAL;
2382 }
2383
2384 if (parent->length < sizeof(binder_uintptr_t) ||
2385 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2386 /* No space for a pointer here! */
2387 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2388 proc->pid, thread->pid);
2389 return -EINVAL;
2390 }
2391 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002392 binder_alloc_get_user_buffer_offset(
2393 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002394 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2395
2396 return 0;
2397}
2398
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002399static void binder_transaction(struct binder_proc *proc,
2400 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002401 struct binder_transaction_data *tr, int reply,
2402 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002403{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002404 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002405 struct binder_transaction *t;
2406 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002407 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002408 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002409 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002410 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002411 struct binder_thread *target_thread = NULL;
2412 struct binder_node *target_node = NULL;
2413 struct list_head *target_list;
2414 wait_queue_head_t *target_wait;
2415 struct binder_transaction *in_reply_to = NULL;
2416 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002417 uint32_t return_error = 0;
2418 uint32_t return_error_param = 0;
2419 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002420 struct binder_buffer_object *last_fixup_obj = NULL;
2421 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002422 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002423 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002424
2425 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002426 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002427 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2428 e->from_proc = proc->pid;
2429 e->from_thread = thread->pid;
2430 e->target_handle = tr->target.handle;
2431 e->data_size = tr->data_size;
2432 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002433 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002434
2435 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002436 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002437 in_reply_to = thread->transaction_stack;
2438 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002439 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302440 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002441 proc->pid, thread->pid);
2442 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002443 return_error_param = -EPROTO;
2444 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002445 goto err_empty_call_stack;
2446 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002447 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002448 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302449 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002450 proc->pid, thread->pid, in_reply_to->debug_id,
2451 in_reply_to->to_proc ?
2452 in_reply_to->to_proc->pid : 0,
2453 in_reply_to->to_thread ?
2454 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002455 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002456 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002457 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002458 return_error_param = -EPROTO;
2459 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002460 in_reply_to = NULL;
2461 goto err_bad_call_stack;
2462 }
2463 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002464 binder_inner_proc_unlock(proc);
2465 binder_set_nice(in_reply_to->saved_priority);
2466 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002467 if (target_thread == NULL) {
2468 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002469 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002470 goto err_dead_binder;
2471 }
2472 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302473 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002474 proc->pid, thread->pid,
2475 target_thread->transaction_stack ?
2476 target_thread->transaction_stack->debug_id : 0,
2477 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002478 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002479 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002480 return_error_param = -EPROTO;
2481 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002482 in_reply_to = NULL;
2483 target_thread = NULL;
2484 goto err_dead_binder;
2485 }
2486 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002487 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002488 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002489 } else {
2490 if (tr->target.handle) {
2491 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002492
Todd Kjosc37162d2017-05-26 11:56:29 -07002493 /*
2494 * There must already be a strong ref
2495 * on this node. If so, do a strong
2496 * increment on the node to ensure it
2497 * stays alive until the transaction is
2498 * done.
2499 */
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002500 ref = binder_get_ref(proc, tr->target.handle, true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002501 if (ref) {
2502 binder_inc_node(ref->node, 1, 0, NULL);
2503 target_node = ref->node;
2504 }
2505 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302506 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002507 proc->pid, thread->pid);
2508 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002509 return_error_param = -EINVAL;
2510 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002511 goto err_invalid_target_handle;
2512 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002513 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002514 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002515 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002516 if (target_node == NULL) {
2517 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002518 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjose598d172017-03-22 17:19:52 -07002519 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002520 goto err_no_context_mgr_node;
2521 }
Todd Kjosc37162d2017-05-26 11:56:29 -07002522 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002523 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002524 }
2525 e->to_node = target_node->debug_id;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002526 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002527 target_proc = target_node->proc;
2528 if (target_proc == NULL) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002529 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002530 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002531 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002532 goto err_dead_binder;
2533 }
Todd Kjosb4827902017-05-25 15:52:17 -07002534 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002535 target_proc->tmp_ref++;
Todd Kjosb4827902017-05-25 15:52:17 -07002536 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002537 binder_node_unlock(target_node);
Stephen Smalley79af7302015-01-21 10:54:10 -05002538 if (security_binder_transaction(proc->tsk,
2539 target_proc->tsk) < 0) {
2540 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002541 return_error_param = -EPERM;
2542 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05002543 goto err_invalid_target_handle;
2544 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002545 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002546 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2547 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002548
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002549 tmp = thread->transaction_stack;
2550 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002551 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302552 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002553 proc->pid, thread->pid, tmp->debug_id,
2554 tmp->to_proc ? tmp->to_proc->pid : 0,
2555 tmp->to_thread ?
2556 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002557 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002558 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002559 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002560 return_error_param = -EPROTO;
2561 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002562 goto err_bad_call_stack;
2563 }
2564 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002565 struct binder_thread *from;
2566
2567 spin_lock(&tmp->lock);
2568 from = tmp->from;
2569 if (from && from->proc == target_proc) {
2570 atomic_inc(&from->tmp_ref);
2571 target_thread = from;
2572 spin_unlock(&tmp->lock);
2573 break;
2574 }
2575 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002576 tmp = tmp->from_parent;
2577 }
2578 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002579 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002580 }
2581 if (target_thread) {
2582 e->to_thread = target_thread->pid;
2583 target_list = &target_thread->todo;
2584 target_wait = &target_thread->wait;
2585 } else {
2586 target_list = &target_proc->todo;
2587 target_wait = &target_proc->wait;
2588 }
2589 e->to_proc = target_proc->pid;
2590
2591 /* TODO: reuse incoming transaction for reply */
2592 t = kzalloc(sizeof(*t), GFP_KERNEL);
2593 if (t == NULL) {
2594 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002595 return_error_param = -ENOMEM;
2596 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002597 goto err_alloc_t_failed;
2598 }
2599 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07002600 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002601
2602 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2603 if (tcomplete == NULL) {
2604 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002605 return_error_param = -ENOMEM;
2606 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002607 goto err_alloc_tcomplete_failed;
2608 }
2609 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2610
Todd Kjos1cfe6272017-05-24 13:33:28 -07002611 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002612
2613 if (reply)
2614 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002615 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002616 proc->pid, thread->pid, t->debug_id,
2617 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002618 (u64)tr->data.ptr.buffer,
2619 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002620 (u64)tr->data_size, (u64)tr->offsets_size,
2621 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002622 else
2623 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002624 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002625 proc->pid, thread->pid, t->debug_id,
2626 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002627 (u64)tr->data.ptr.buffer,
2628 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002629 (u64)tr->data_size, (u64)tr->offsets_size,
2630 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002631
2632 if (!reply && !(tr->flags & TF_ONE_WAY))
2633 t->from = thread;
2634 else
2635 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03002636 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002637 t->to_proc = target_proc;
2638 t->to_thread = target_thread;
2639 t->code = tr->code;
2640 t->flags = tr->flags;
2641 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002642
2643 trace_binder_transaction(reply, t, target_node);
2644
Todd Kjosd325d372016-10-10 10:40:53 -07002645 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02002646 tr->offsets_size, extra_buffers_size,
2647 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07002648 if (IS_ERR(t->buffer)) {
2649 /*
2650 * -ESRCH indicates VMA cleared. The target is dying.
2651 */
2652 return_error_param = PTR_ERR(t->buffer);
2653 return_error = return_error_param == -ESRCH ?
2654 BR_DEAD_REPLY : BR_FAILED_REPLY;
2655 return_error_line = __LINE__;
2656 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002657 goto err_binder_alloc_buf_failed;
2658 }
2659 t->buffer->allow_user_free = 0;
2660 t->buffer->debug_id = t->debug_id;
2661 t->buffer->transaction = t;
2662 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002663 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002664 off_start = (binder_size_t *)(t->buffer->data +
2665 ALIGN(tr->data_size, sizeof(void *)));
2666 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002667
Arve Hjønnevågda498892014-02-21 14:40:26 -08002668 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2669 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302670 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2671 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002672 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002673 return_error_param = -EFAULT;
2674 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002675 goto err_copy_data_failed;
2676 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002677 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2678 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302679 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2680 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002681 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002682 return_error_param = -EFAULT;
2683 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002684 goto err_copy_data_failed;
2685 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002686 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2687 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2688 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002689 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002690 return_error_param = -EINVAL;
2691 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002692 goto err_bad_offset;
2693 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02002694 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2695 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2696 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05302697 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002698 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002699 return_error_param = -EINVAL;
2700 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002701 goto err_bad_offset;
2702 }
2703 off_end = (void *)off_start + tr->offsets_size;
2704 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2705 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002706 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002707 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002708 struct binder_object_header *hdr;
2709 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002710
Martijn Coenen00c80372016-07-13 12:06:49 +02002711 if (object_size == 0 || *offp < off_min) {
2712 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002713 proc->pid, thread->pid, (u64)*offp,
2714 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02002715 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002716 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002717 return_error_param = -EINVAL;
2718 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002719 goto err_bad_offset;
2720 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002721
2722 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2723 off_min = *offp + object_size;
2724 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002725 case BINDER_TYPE_BINDER:
2726 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002727 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002728
Martijn Coenen00c80372016-07-13 12:06:49 +02002729 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002730 ret = binder_translate_binder(fp, t, thread);
2731 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02002732 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002733 return_error_param = ret;
2734 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002735 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002736 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002737 } break;
2738 case BINDER_TYPE_HANDLE:
2739 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002740 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002741
Martijn Coenen00c80372016-07-13 12:06:49 +02002742 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002743 ret = binder_translate_handle(fp, t, thread);
2744 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002745 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002746 return_error_param = ret;
2747 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002748 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002749 }
2750 } break;
2751
2752 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002753 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002754 int target_fd = binder_translate_fd(fp->fd, t, thread,
2755 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002756
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002757 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002758 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002759 return_error_param = target_fd;
2760 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002761 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002762 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002763 fp->pad_binder = 0;
2764 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002765 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002766 case BINDER_TYPE_FDA: {
2767 struct binder_fd_array_object *fda =
2768 to_binder_fd_array_object(hdr);
2769 struct binder_buffer_object *parent =
2770 binder_validate_ptr(t->buffer, fda->parent,
2771 off_start,
2772 offp - off_start);
2773 if (!parent) {
2774 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2775 proc->pid, thread->pid);
2776 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002777 return_error_param = -EINVAL;
2778 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002779 goto err_bad_parent;
2780 }
2781 if (!binder_validate_fixup(t->buffer, off_start,
2782 parent, fda->parent_offset,
2783 last_fixup_obj,
2784 last_fixup_min_off)) {
2785 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2786 proc->pid, thread->pid);
2787 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002788 return_error_param = -EINVAL;
2789 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002790 goto err_bad_parent;
2791 }
2792 ret = binder_translate_fd_array(fda, parent, t, thread,
2793 in_reply_to);
2794 if (ret < 0) {
2795 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002796 return_error_param = ret;
2797 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002798 goto err_translate_failed;
2799 }
2800 last_fixup_obj = parent;
2801 last_fixup_min_off =
2802 fda->parent_offset + sizeof(u32) * fda->num_fds;
2803 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002804 case BINDER_TYPE_PTR: {
2805 struct binder_buffer_object *bp =
2806 to_binder_buffer_object(hdr);
2807 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002808
Martijn Coenen5a6da532016-09-30 14:10:07 +02002809 if (bp->length > buf_left) {
2810 binder_user_error("%d:%d got transaction with too large buffer\n",
2811 proc->pid, thread->pid);
2812 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002813 return_error_param = -EINVAL;
2814 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002815 goto err_bad_offset;
2816 }
2817 if (copy_from_user(sg_bufp,
2818 (const void __user *)(uintptr_t)
2819 bp->buffer, bp->length)) {
2820 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2821 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07002822 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002823 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002824 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002825 goto err_copy_data_failed;
2826 }
2827 /* Fixup buffer pointer to target proc address space */
2828 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07002829 binder_alloc_get_user_buffer_offset(
2830 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002831 sg_bufp += ALIGN(bp->length, sizeof(u64));
2832
2833 ret = binder_fixup_parent(t, thread, bp, off_start,
2834 offp - off_start,
2835 last_fixup_obj,
2836 last_fixup_min_off);
2837 if (ret < 0) {
2838 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002839 return_error_param = ret;
2840 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002841 goto err_translate_failed;
2842 }
2843 last_fixup_obj = bp;
2844 last_fixup_min_off = 0;
2845 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002846 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002847 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002848 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002849 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002850 return_error_param = -EINVAL;
2851 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002852 goto err_bad_object_type;
2853 }
2854 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002855 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002856 binder_enqueue_work(proc, tcomplete, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002857 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002858
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002859 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002860 binder_inner_proc_lock(target_proc);
2861 if (target_thread->is_dead) {
2862 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002863 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002864 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002865 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002866 binder_pop_transaction_ilocked(target_thread, in_reply_to);
2867 binder_enqueue_work_ilocked(&t->work, target_list);
2868 binder_inner_proc_unlock(target_proc);
Todd Kjos21ef40a2017-03-30 18:02:13 -07002869 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002870 } else if (!(t->flags & TF_ONE_WAY)) {
2871 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002872 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002873 t->need_reply = 1;
2874 t->from_parent = thread->transaction_stack;
2875 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002876 binder_inner_proc_unlock(proc);
2877 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002878 if (target_proc->is_dead ||
2879 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002880 binder_inner_proc_unlock(target_proc);
2881 binder_inner_proc_lock(proc);
2882 binder_pop_transaction_ilocked(thread, t);
2883 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002884 goto err_dead_proc_or_thread;
2885 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002886 binder_enqueue_work_ilocked(&t->work, target_list);
2887 binder_inner_proc_unlock(target_proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002888 } else {
2889 BUG_ON(target_node == NULL);
2890 BUG_ON(t->buffer->async_transaction != 1);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002891 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002892 if (target_node->has_async_transaction) {
2893 target_list = &target_node->async_todo;
2894 target_wait = NULL;
2895 } else
2896 target_node->has_async_transaction = 1;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002897 /*
2898 * Test/set of has_async_transaction
2899 * must be atomic with enqueue on
2900 * async_todo
2901 */
Martijn Coenen995a36e2017-06-02 13:36:52 -07002902 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002903 if (target_proc->is_dead ||
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002904 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002905 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002906 binder_node_unlock(target_node);
Todd Kjos2f993e22017-05-12 14:42:55 -07002907 goto err_dead_proc_or_thread;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002908 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002909 binder_enqueue_work_ilocked(&t->work, target_list);
2910 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002911 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002912 }
Riley Andrewsb5968812015-09-01 12:42:07 -07002913 if (target_wait) {
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002914 if (reply || !(tr->flags & TF_ONE_WAY))
Riley Andrewsb5968812015-09-01 12:42:07 -07002915 wake_up_interruptible_sync(target_wait);
2916 else
2917 wake_up_interruptible(target_wait);
2918 }
Todd Kjos2f993e22017-05-12 14:42:55 -07002919 if (target_thread)
2920 binder_thread_dec_tmpref(target_thread);
2921 binder_proc_dec_tmpref(target_proc);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002922 /*
2923 * write barrier to synchronize with initialization
2924 * of log entry
2925 */
2926 smp_wmb();
2927 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002928 return;
2929
Todd Kjos2f993e22017-05-12 14:42:55 -07002930err_dead_proc_or_thread:
2931 return_error = BR_DEAD_REPLY;
2932 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002933err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002934err_bad_object_type:
2935err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002936err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002937err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002938 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002939 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjosc37162d2017-05-26 11:56:29 -07002940 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002941 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07002942 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002943err_binder_alloc_buf_failed:
2944 kfree(tcomplete);
2945 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2946err_alloc_tcomplete_failed:
2947 kfree(t);
2948 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2949err_alloc_t_failed:
2950err_bad_call_stack:
2951err_empty_call_stack:
2952err_dead_binder:
2953err_invalid_target_handle:
2954err_no_context_mgr_node:
Todd Kjos2f993e22017-05-12 14:42:55 -07002955 if (target_thread)
2956 binder_thread_dec_tmpref(target_thread);
2957 if (target_proc)
2958 binder_proc_dec_tmpref(target_proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002959 if (target_node)
2960 binder_dec_node(target_node, 1, 0);
2961
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002962 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07002963 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2964 proc->pid, thread->pid, return_error, return_error_param,
2965 (u64)tr->data_size, (u64)tr->offsets_size,
2966 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002967
2968 {
2969 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09002970
Todd Kjose598d172017-03-22 17:19:52 -07002971 e->return_error = return_error;
2972 e->return_error_param = return_error_param;
2973 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002974 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2975 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002976 /*
2977 * write barrier to synchronize with initialization
2978 * of log entry
2979 */
2980 smp_wmb();
2981 WRITE_ONCE(e->debug_id_done, t_debug_id);
2982 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002983 }
2984
Todd Kjos858b8da2017-04-21 17:35:12 -07002985 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002986 if (in_reply_to) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002987 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002988 binder_enqueue_work(thread->proc,
2989 &thread->return_error.work,
2990 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002991 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07002992 } else {
2993 thread->return_error.cmd = return_error;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002994 binder_enqueue_work(thread->proc,
2995 &thread->return_error.work,
2996 &thread->todo);
Todd Kjos858b8da2017-04-21 17:35:12 -07002997 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002998}
2999
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003000static int binder_thread_write(struct binder_proc *proc,
3001 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003002 binder_uintptr_t binder_buffer, size_t size,
3003 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003004{
3005 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003006 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003007 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003008 void __user *ptr = buffer + *consumed;
3009 void __user *end = buffer + size;
3010
Todd Kjos858b8da2017-04-21 17:35:12 -07003011 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003012 int ret;
3013
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003014 if (get_user(cmd, (uint32_t __user *)ptr))
3015 return -EFAULT;
3016 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003017 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003018 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003019 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3020 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3021 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003022 }
3023 switch (cmd) {
3024 case BC_INCREFS:
3025 case BC_ACQUIRE:
3026 case BC_RELEASE:
3027 case BC_DECREFS: {
3028 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003029 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003030 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3031 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3032 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003033
3034 if (get_user(target, (uint32_t __user *)ptr))
3035 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003036
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003037 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003038 ret = -1;
3039 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003040 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003041 mutex_lock(&context->context_mgr_node_lock);
3042 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003043 if (ctx_mgr_node)
3044 ret = binder_inc_ref_for_node(
3045 proc, ctx_mgr_node,
3046 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003047 mutex_unlock(&context->context_mgr_node_lock);
3048 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003049 if (ret)
3050 ret = binder_update_ref_for_handle(
3051 proc, target, increment, strong,
3052 &rdata);
3053 if (!ret && rdata.desc != target) {
3054 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3055 proc->pid, thread->pid,
3056 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003057 }
3058 switch (cmd) {
3059 case BC_INCREFS:
3060 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003061 break;
3062 case BC_ACQUIRE:
3063 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003064 break;
3065 case BC_RELEASE:
3066 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003067 break;
3068 case BC_DECREFS:
3069 default:
3070 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003071 break;
3072 }
3073 if (ret) {
3074 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3075 proc->pid, thread->pid, debug_string,
3076 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003077 break;
3078 }
3079 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003080 "%d:%d %s ref %d desc %d s %d w %d\n",
3081 proc->pid, thread->pid, debug_string,
3082 rdata.debug_id, rdata.desc, rdata.strong,
3083 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003084 break;
3085 }
3086 case BC_INCREFS_DONE:
3087 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003088 binder_uintptr_t node_ptr;
3089 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003090 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003091 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003092
Arve Hjønnevågda498892014-02-21 14:40:26 -08003093 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003094 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003095 ptr += sizeof(binder_uintptr_t);
3096 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003097 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003098 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003099 node = binder_get_node(proc, node_ptr);
3100 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003101 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003102 proc->pid, thread->pid,
3103 cmd == BC_INCREFS_DONE ?
3104 "BC_INCREFS_DONE" :
3105 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003106 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003107 break;
3108 }
3109 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003110 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003111 proc->pid, thread->pid,
3112 cmd == BC_INCREFS_DONE ?
3113 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003114 (u64)node_ptr, node->debug_id,
3115 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003116 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003117 break;
3118 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003119 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003120 if (cmd == BC_ACQUIRE_DONE) {
3121 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303122 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003123 proc->pid, thread->pid,
3124 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003125 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003126 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003127 break;
3128 }
3129 node->pending_strong_ref = 0;
3130 } else {
3131 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303132 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003133 proc->pid, thread->pid,
3134 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003135 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003136 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003137 break;
3138 }
3139 node->pending_weak_ref = 0;
3140 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003141 free_node = binder_dec_node_nilocked(node,
3142 cmd == BC_ACQUIRE_DONE, 0);
3143 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003144 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003145 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003146 proc->pid, thread->pid,
3147 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003148 node->debug_id, node->local_strong_refs,
3149 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003150 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003151 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003152 break;
3153 }
3154 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303155 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003156 return -EINVAL;
3157 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303158 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003159 return -EINVAL;
3160
3161 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003162 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003163 struct binder_buffer *buffer;
3164
Arve Hjønnevågda498892014-02-21 14:40:26 -08003165 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003166 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003167 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003168
Todd Kjos076072a2017-04-21 14:32:11 -07003169 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3170 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003171 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003172 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3173 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003174 break;
3175 }
3176 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003177 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3178 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003179 break;
3180 }
3181 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003182 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3183 proc->pid, thread->pid, (u64)data_ptr,
3184 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003185 buffer->transaction ? "active" : "finished");
3186
3187 if (buffer->transaction) {
3188 buffer->transaction->buffer = NULL;
3189 buffer->transaction = NULL;
3190 }
3191 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003192 struct binder_node *buf_node;
3193 struct binder_work *w;
3194
3195 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003196 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003197 BUG_ON(!buf_node->has_async_transaction);
3198 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003199 w = binder_dequeue_work_head_ilocked(
3200 &buf_node->async_todo);
3201 if (!w)
3202 buf_node->has_async_transaction = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003203 else
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003204 binder_enqueue_work_ilocked(
3205 w, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003206 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003207 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003208 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003209 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003210 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003211 break;
3212 }
3213
Martijn Coenen5a6da532016-09-30 14:10:07 +02003214 case BC_TRANSACTION_SG:
3215 case BC_REPLY_SG: {
3216 struct binder_transaction_data_sg tr;
3217
3218 if (copy_from_user(&tr, ptr, sizeof(tr)))
3219 return -EFAULT;
3220 ptr += sizeof(tr);
3221 binder_transaction(proc, thread, &tr.transaction_data,
3222 cmd == BC_REPLY_SG, tr.buffers_size);
3223 break;
3224 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003225 case BC_TRANSACTION:
3226 case BC_REPLY: {
3227 struct binder_transaction_data tr;
3228
3229 if (copy_from_user(&tr, ptr, sizeof(tr)))
3230 return -EFAULT;
3231 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003232 binder_transaction(proc, thread, &tr,
3233 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003234 break;
3235 }
3236
3237 case BC_REGISTER_LOOPER:
3238 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303239 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003240 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003241 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003242 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3243 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303244 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003245 proc->pid, thread->pid);
3246 } else if (proc->requested_threads == 0) {
3247 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303248 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003249 proc->pid, thread->pid);
3250 } else {
3251 proc->requested_threads--;
3252 proc->requested_threads_started++;
3253 }
3254 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003255 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003256 break;
3257 case BC_ENTER_LOOPER:
3258 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303259 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003260 proc->pid, thread->pid);
3261 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3262 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303263 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003264 proc->pid, thread->pid);
3265 }
3266 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3267 break;
3268 case BC_EXIT_LOOPER:
3269 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303270 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003271 proc->pid, thread->pid);
3272 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3273 break;
3274
3275 case BC_REQUEST_DEATH_NOTIFICATION:
3276 case BC_CLEAR_DEATH_NOTIFICATION: {
3277 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003278 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003279 struct binder_ref *ref;
3280 struct binder_ref_death *death;
3281
3282 if (get_user(target, (uint32_t __user *)ptr))
3283 return -EFAULT;
3284 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003285 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003286 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003287 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003288 ref = binder_get_ref(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003289 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303290 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003291 proc->pid, thread->pid,
3292 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3293 "BC_REQUEST_DEATH_NOTIFICATION" :
3294 "BC_CLEAR_DEATH_NOTIFICATION",
3295 target);
3296 break;
3297 }
3298
3299 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003300 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003301 proc->pid, thread->pid,
3302 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3303 "BC_REQUEST_DEATH_NOTIFICATION" :
3304 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003305 (u64)cookie, ref->data.debug_id,
3306 ref->data.desc, ref->data.strong,
3307 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003308
3309 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3310 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303311 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003312 proc->pid, thread->pid);
3313 break;
3314 }
3315 death = kzalloc(sizeof(*death), GFP_KERNEL);
3316 if (death == NULL) {
Todd Kjos858b8da2017-04-21 17:35:12 -07003317 WARN_ON(thread->return_error.cmd !=
3318 BR_OK);
3319 thread->return_error.cmd = BR_ERROR;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003320 binder_enqueue_work(
3321 thread->proc,
3322 &thread->return_error.work,
3323 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003324 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303325 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003326 proc->pid, thread->pid);
3327 break;
3328 }
3329 binder_stats_created(BINDER_STAT_DEATH);
3330 INIT_LIST_HEAD(&death->work.entry);
3331 death->cookie = cookie;
3332 ref->death = death;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003333 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003334 if (ref->node->proc == NULL) {
3335 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003336 if (thread->looper &
3337 (BINDER_LOOPER_STATE_REGISTERED |
3338 BINDER_LOOPER_STATE_ENTERED))
3339 binder_enqueue_work(
3340 proc,
3341 &ref->death->work,
3342 &thread->todo);
3343 else {
3344 binder_enqueue_work(
3345 proc,
3346 &ref->death->work,
3347 &proc->todo);
3348 wake_up_interruptible(
3349 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003350 }
3351 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003352 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003353 } else {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003354 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003355 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303356 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003357 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003358 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003359 break;
3360 }
3361 death = ref->death;
3362 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003363 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003364 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003365 (u64)death->cookie,
3366 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003367 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003368 break;
3369 }
3370 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003371 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003372 if (list_empty(&death->work.entry)) {
3373 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003374 if (thread->looper &
3375 (BINDER_LOOPER_STATE_REGISTERED |
3376 BINDER_LOOPER_STATE_ENTERED))
3377 binder_enqueue_work_ilocked(
3378 &death->work,
3379 &thread->todo);
3380 else {
3381 binder_enqueue_work_ilocked(
3382 &death->work,
3383 &proc->todo);
3384 wake_up_interruptible(
3385 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003386 }
3387 } else {
3388 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3389 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3390 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003391 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003392 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003393 }
3394 } break;
3395 case BC_DEAD_BINDER_DONE: {
3396 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003397 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003398 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003399
Arve Hjønnevågda498892014-02-21 14:40:26 -08003400 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003401 return -EFAULT;
3402
Lisa Du7a64cd82016-02-17 09:32:52 +08003403 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003404 binder_inner_proc_lock(proc);
3405 list_for_each_entry(w, &proc->delivered_death,
3406 entry) {
3407 struct binder_ref_death *tmp_death =
3408 container_of(w,
3409 struct binder_ref_death,
3410 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003411
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003412 if (tmp_death->cookie == cookie) {
3413 death = tmp_death;
3414 break;
3415 }
3416 }
3417 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003418 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3419 proc->pid, thread->pid, (u64)cookie,
3420 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003421 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003422 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3423 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003424 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003425 break;
3426 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003427 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003428 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3429 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003430 if (thread->looper &
3431 (BINDER_LOOPER_STATE_REGISTERED |
3432 BINDER_LOOPER_STATE_ENTERED))
3433 binder_enqueue_work_ilocked(
3434 &death->work, &thread->todo);
3435 else {
3436 binder_enqueue_work_ilocked(
3437 &death->work,
3438 &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003439 wake_up_interruptible(&proc->wait);
3440 }
3441 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003442 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003443 } break;
3444
3445 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303446 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003447 proc->pid, thread->pid, cmd);
3448 return -EINVAL;
3449 }
3450 *consumed = ptr - buffer;
3451 }
3452 return 0;
3453}
3454
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003455static void binder_stat_br(struct binder_proc *proc,
3456 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003457{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003458 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003459 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003460 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3461 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3462 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003463 }
3464}
3465
3466static int binder_has_proc_work(struct binder_proc *proc,
3467 struct binder_thread *thread)
3468{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003469 return !binder_worklist_empty(proc, &proc->todo) ||
3470 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003471}
3472
3473static int binder_has_thread_work(struct binder_thread *thread)
3474{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003475 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3476 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003477}
3478
Todd Kjos60792612017-05-24 10:51:01 -07003479static int binder_put_node_cmd(struct binder_proc *proc,
3480 struct binder_thread *thread,
3481 void __user **ptrp,
3482 binder_uintptr_t node_ptr,
3483 binder_uintptr_t node_cookie,
3484 int node_debug_id,
3485 uint32_t cmd, const char *cmd_name)
3486{
3487 void __user *ptr = *ptrp;
3488
3489 if (put_user(cmd, (uint32_t __user *)ptr))
3490 return -EFAULT;
3491 ptr += sizeof(uint32_t);
3492
3493 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3494 return -EFAULT;
3495 ptr += sizeof(binder_uintptr_t);
3496
3497 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3498 return -EFAULT;
3499 ptr += sizeof(binder_uintptr_t);
3500
3501 binder_stat_br(proc, thread, cmd);
3502 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3503 proc->pid, thread->pid, cmd_name, node_debug_id,
3504 (u64)node_ptr, (u64)node_cookie);
3505
3506 *ptrp = ptr;
3507 return 0;
3508}
3509
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003510static int binder_thread_read(struct binder_proc *proc,
3511 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003512 binder_uintptr_t binder_buffer, size_t size,
3513 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003514{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003515 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003516 void __user *ptr = buffer + *consumed;
3517 void __user *end = buffer + size;
3518
3519 int ret = 0;
3520 int wait_for_proc_work;
3521
3522 if (*consumed == 0) {
3523 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3524 return -EFAULT;
3525 ptr += sizeof(uint32_t);
3526 }
3527
3528retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07003529 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003530 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07003531 binder_worklist_empty_ilocked(&thread->todo);
Todd Kjosd600e902017-05-25 17:35:02 -07003532 if (wait_for_proc_work)
3533 proc->ready_threads++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003534 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003535
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003536 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003537
3538 binder_unlock(__func__);
3539
3540 trace_binder_wait_for_work(wait_for_proc_work,
3541 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003542 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003543 if (wait_for_proc_work) {
3544 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3545 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303546 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003547 proc->pid, thread->pid, thread->looper);
3548 wait_event_interruptible(binder_user_error_wait,
3549 binder_stop_on_user_error < 2);
3550 }
3551 binder_set_nice(proc->default_priority);
3552 if (non_block) {
3553 if (!binder_has_proc_work(proc, thread))
3554 ret = -EAGAIN;
3555 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003556 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003557 } else {
3558 if (non_block) {
3559 if (!binder_has_thread_work(thread))
3560 ret = -EAGAIN;
3561 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003562 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003563 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003564
3565 binder_lock(__func__);
3566
Todd Kjosd600e902017-05-25 17:35:02 -07003567 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003568 if (wait_for_proc_work)
3569 proc->ready_threads--;
Todd Kjosd600e902017-05-25 17:35:02 -07003570 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003571 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3572
3573 if (ret)
3574 return ret;
3575
3576 while (1) {
3577 uint32_t cmd;
3578 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003579 struct binder_work *w = NULL;
3580 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003581 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07003582 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003583
Todd Kjose7f23ed2017-03-21 13:06:01 -07003584 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003585 if (!binder_worklist_empty_ilocked(&thread->todo))
3586 list = &thread->todo;
3587 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3588 wait_for_proc_work)
3589 list = &proc->todo;
3590 else {
3591 binder_inner_proc_unlock(proc);
3592
Dmitry Voytik395262a2014-09-08 18:16:34 +04003593 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08003594 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003595 goto retry;
3596 break;
3597 }
3598
Todd Kjose7f23ed2017-03-21 13:06:01 -07003599 if (end - ptr < sizeof(tr) + 4) {
3600 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003601 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003602 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003603 w = binder_dequeue_work_head_ilocked(list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003604
3605 switch (w->type) {
3606 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003607 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003608 t = container_of(w, struct binder_transaction, work);
3609 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003610 case BINDER_WORK_RETURN_ERROR: {
3611 struct binder_error *e = container_of(
3612 w, struct binder_error, work);
3613
3614 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003615 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07003616 if (put_user(e->cmd, (uint32_t __user *)ptr))
3617 return -EFAULT;
3618 e->cmd = BR_OK;
3619 ptr += sizeof(uint32_t);
3620
3621 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07003622 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003623 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003624 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003625 cmd = BR_TRANSACTION_COMPLETE;
3626 if (put_user(cmd, (uint32_t __user *)ptr))
3627 return -EFAULT;
3628 ptr += sizeof(uint32_t);
3629
3630 binder_stat_br(proc, thread, cmd);
3631 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303632 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003633 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003634 kfree(w);
3635 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3636 } break;
3637 case BINDER_WORK_NODE: {
3638 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07003639 int strong, weak;
3640 binder_uintptr_t node_ptr = node->ptr;
3641 binder_uintptr_t node_cookie = node->cookie;
3642 int node_debug_id = node->debug_id;
3643 int has_weak_ref;
3644 int has_strong_ref;
3645 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09003646
Todd Kjos60792612017-05-24 10:51:01 -07003647 BUG_ON(proc != node->proc);
3648 strong = node->internal_strong_refs ||
3649 node->local_strong_refs;
3650 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07003651 node->local_weak_refs ||
3652 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07003653 has_strong_ref = node->has_strong_ref;
3654 has_weak_ref = node->has_weak_ref;
3655
3656 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003657 node->has_weak_ref = 1;
3658 node->pending_weak_ref = 1;
3659 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003660 }
3661 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003662 node->has_strong_ref = 1;
3663 node->pending_strong_ref = 1;
3664 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003665 }
3666 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003667 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003668 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003669 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003670 if (!weak && !strong) {
3671 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3672 "%d:%d node %d u%016llx c%016llx deleted\n",
3673 proc->pid, thread->pid,
3674 node_debug_id,
3675 (u64)node_ptr,
3676 (u64)node_cookie);
3677 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003678 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003679 binder_node_lock(node);
3680 /*
3681 * Acquire the node lock before freeing the
3682 * node to serialize with other threads that
3683 * may have been holding the node lock while
3684 * decrementing this node (avoids race where
3685 * this thread frees while the other thread
3686 * is unlocking the node after the final
3687 * decrement)
3688 */
3689 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003690 binder_free_node(node);
3691 } else
3692 binder_inner_proc_unlock(proc);
3693
Todd Kjos60792612017-05-24 10:51:01 -07003694 if (weak && !has_weak_ref)
3695 ret = binder_put_node_cmd(
3696 proc, thread, &ptr, node_ptr,
3697 node_cookie, node_debug_id,
3698 BR_INCREFS, "BR_INCREFS");
3699 if (!ret && strong && !has_strong_ref)
3700 ret = binder_put_node_cmd(
3701 proc, thread, &ptr, node_ptr,
3702 node_cookie, node_debug_id,
3703 BR_ACQUIRE, "BR_ACQUIRE");
3704 if (!ret && !strong && has_strong_ref)
3705 ret = binder_put_node_cmd(
3706 proc, thread, &ptr, node_ptr,
3707 node_cookie, node_debug_id,
3708 BR_RELEASE, "BR_RELEASE");
3709 if (!ret && !weak && has_weak_ref)
3710 ret = binder_put_node_cmd(
3711 proc, thread, &ptr, node_ptr,
3712 node_cookie, node_debug_id,
3713 BR_DECREFS, "BR_DECREFS");
3714 if (orig_ptr == ptr)
3715 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3716 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3717 proc->pid, thread->pid,
3718 node_debug_id,
3719 (u64)node_ptr,
3720 (u64)node_cookie);
3721 if (ret)
3722 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003723 } break;
3724 case BINDER_WORK_DEAD_BINDER:
3725 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3726 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3727 struct binder_ref_death *death;
3728 uint32_t cmd;
3729
3730 death = container_of(w, struct binder_ref_death, work);
3731 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3732 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3733 else
3734 cmd = BR_DEAD_BINDER;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003735 /*
3736 * TODO: there is a race condition between
3737 * death notification requests and delivery
3738 * of the notifications. This will be handled
3739 * in a later patch.
3740 */
3741 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003742 if (put_user(cmd, (uint32_t __user *)ptr))
3743 return -EFAULT;
3744 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003745 if (put_user(death->cookie,
3746 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003747 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003748 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003749 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003750 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003751 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003752 proc->pid, thread->pid,
3753 cmd == BR_DEAD_BINDER ?
3754 "BR_DEAD_BINDER" :
3755 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003756 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003757
3758 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003759 kfree(death);
3760 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003761 } else {
3762 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003763 binder_enqueue_work_ilocked(
3764 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003765 binder_inner_proc_unlock(proc);
3766 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003767 if (cmd == BR_DEAD_BINDER)
3768 goto done; /* DEAD_BINDER notifications can cause transactions */
3769 } break;
3770 }
3771
3772 if (!t)
3773 continue;
3774
3775 BUG_ON(t->buffer == NULL);
3776 if (t->buffer->target_node) {
3777 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09003778
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003779 tr.target.ptr = target_node->ptr;
3780 tr.cookie = target_node->cookie;
3781 t->saved_priority = task_nice(current);
3782 if (t->priority < target_node->min_priority &&
3783 !(t->flags & TF_ONE_WAY))
3784 binder_set_nice(t->priority);
3785 else if (!(t->flags & TF_ONE_WAY) ||
3786 t->saved_priority > target_node->min_priority)
3787 binder_set_nice(target_node->min_priority);
3788 cmd = BR_TRANSACTION;
3789 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003790 tr.target.ptr = 0;
3791 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003792 cmd = BR_REPLY;
3793 }
3794 tr.code = t->code;
3795 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06003796 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003797
Todd Kjos2f993e22017-05-12 14:42:55 -07003798 t_from = binder_get_txn_from(t);
3799 if (t_from) {
3800 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09003801
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003802 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08003803 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003804 } else {
3805 tr.sender_pid = 0;
3806 }
3807
3808 tr.data_size = t->buffer->data_size;
3809 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07003810 tr.data.ptr.buffer = (binder_uintptr_t)
3811 ((uintptr_t)t->buffer->data +
3812 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003813 tr.data.ptr.offsets = tr.data.ptr.buffer +
3814 ALIGN(t->buffer->data_size,
3815 sizeof(void *));
3816
Todd Kjos2f993e22017-05-12 14:42:55 -07003817 if (put_user(cmd, (uint32_t __user *)ptr)) {
3818 if (t_from)
3819 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003820 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003821 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003822 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07003823 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3824 if (t_from)
3825 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003826 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003827 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003828 ptr += sizeof(tr);
3829
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003830 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003831 binder_stat_br(proc, thread, cmd);
3832 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003833 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003834 proc->pid, thread->pid,
3835 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3836 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07003837 t->debug_id, t_from ? t_from->proc->pid : 0,
3838 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003839 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003840 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003841
Todd Kjos2f993e22017-05-12 14:42:55 -07003842 if (t_from)
3843 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003844 t->buffer->allow_user_free = 1;
3845 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003846 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003847 t->to_parent = thread->transaction_stack;
3848 t->to_thread = thread;
3849 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003850 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003851 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07003852 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003853 }
3854 break;
3855 }
3856
3857done:
3858
3859 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07003860 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003861 if (proc->requested_threads + proc->ready_threads == 0 &&
3862 proc->requested_threads_started < proc->max_threads &&
3863 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3864 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3865 /*spawn a new thread if we leave this out */) {
3866 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07003867 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003868 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303869 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003870 proc->pid, thread->pid);
3871 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3872 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003873 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07003874 } else
3875 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003876 return 0;
3877}
3878
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003879static void binder_release_work(struct binder_proc *proc,
3880 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003881{
3882 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09003883
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003884 while (1) {
3885 w = binder_dequeue_work_head(proc, list);
3886 if (!w)
3887 return;
3888
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003889 switch (w->type) {
3890 case BINDER_WORK_TRANSACTION: {
3891 struct binder_transaction *t;
3892
3893 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003894 if (t->buffer->target_node &&
3895 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003896 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003897 } else {
3898 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303899 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003900 t->debug_id);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003901 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003902 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003903 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003904 case BINDER_WORK_RETURN_ERROR: {
3905 struct binder_error *e = container_of(
3906 w, struct binder_error, work);
3907
3908 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3909 "undelivered TRANSACTION_ERROR: %u\n",
3910 e->cmd);
3911 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003912 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003913 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303914 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003915 kfree(w);
3916 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3917 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003918 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3919 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3920 struct binder_ref_death *death;
3921
3922 death = container_of(w, struct binder_ref_death, work);
3923 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003924 "undelivered death notification, %016llx\n",
3925 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003926 kfree(death);
3927 binder_stats_deleted(BINDER_STAT_DEATH);
3928 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003929 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303930 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003931 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003932 break;
3933 }
3934 }
3935
3936}
3937
Todd Kjosb4827902017-05-25 15:52:17 -07003938static struct binder_thread *binder_get_thread_ilocked(
3939 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003940{
3941 struct binder_thread *thread = NULL;
3942 struct rb_node *parent = NULL;
3943 struct rb_node **p = &proc->threads.rb_node;
3944
3945 while (*p) {
3946 parent = *p;
3947 thread = rb_entry(parent, struct binder_thread, rb_node);
3948
3949 if (current->pid < thread->pid)
3950 p = &(*p)->rb_left;
3951 else if (current->pid > thread->pid)
3952 p = &(*p)->rb_right;
3953 else
Todd Kjosb4827902017-05-25 15:52:17 -07003954 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003955 }
Todd Kjosb4827902017-05-25 15:52:17 -07003956 if (!new_thread)
3957 return NULL;
3958 thread = new_thread;
3959 binder_stats_created(BINDER_STAT_THREAD);
3960 thread->proc = proc;
3961 thread->pid = current->pid;
3962 atomic_set(&thread->tmp_ref, 0);
3963 init_waitqueue_head(&thread->wait);
3964 INIT_LIST_HEAD(&thread->todo);
3965 rb_link_node(&thread->rb_node, parent, p);
3966 rb_insert_color(&thread->rb_node, &proc->threads);
3967 thread->looper_need_return = true;
3968 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3969 thread->return_error.cmd = BR_OK;
3970 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3971 thread->reply_error.cmd = BR_OK;
3972
3973 return thread;
3974}
3975
3976static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3977{
3978 struct binder_thread *thread;
3979 struct binder_thread *new_thread;
3980
3981 binder_inner_proc_lock(proc);
3982 thread = binder_get_thread_ilocked(proc, NULL);
3983 binder_inner_proc_unlock(proc);
3984 if (!thread) {
3985 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3986 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003987 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07003988 binder_inner_proc_lock(proc);
3989 thread = binder_get_thread_ilocked(proc, new_thread);
3990 binder_inner_proc_unlock(proc);
3991 if (thread != new_thread)
3992 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003993 }
3994 return thread;
3995}
3996
Todd Kjos2f993e22017-05-12 14:42:55 -07003997static void binder_free_proc(struct binder_proc *proc)
3998{
3999 BUG_ON(!list_empty(&proc->todo));
4000 BUG_ON(!list_empty(&proc->delivered_death));
4001 binder_alloc_deferred_release(&proc->alloc);
4002 put_task_struct(proc->tsk);
4003 binder_stats_deleted(BINDER_STAT_PROC);
4004 kfree(proc);
4005}
4006
4007static void binder_free_thread(struct binder_thread *thread)
4008{
4009 BUG_ON(!list_empty(&thread->todo));
4010 binder_stats_deleted(BINDER_STAT_THREAD);
4011 binder_proc_dec_tmpref(thread->proc);
4012 kfree(thread);
4013}
4014
4015static int binder_thread_release(struct binder_proc *proc,
4016 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004017{
4018 struct binder_transaction *t;
4019 struct binder_transaction *send_reply = NULL;
4020 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004021 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004022
Todd Kjosb4827902017-05-25 15:52:17 -07004023 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004024 /*
4025 * take a ref on the proc so it survives
4026 * after we remove this thread from proc->threads.
4027 * The corresponding dec is when we actually
4028 * free the thread in binder_free_thread()
4029 */
4030 proc->tmp_ref++;
4031 /*
4032 * take a ref on this thread to ensure it
4033 * survives while we are releasing it
4034 */
4035 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004036 rb_erase(&thread->rb_node, &proc->threads);
4037 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004038 if (t) {
4039 spin_lock(&t->lock);
4040 if (t->to_thread == thread)
4041 send_reply = t;
4042 }
4043 thread->is_dead = true;
4044
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004045 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004046 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004047 active_transactions++;
4048 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304049 "release %d:%d transaction %d %s, still active\n",
4050 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004051 t->debug_id,
4052 (t->to_thread == thread) ? "in" : "out");
4053
4054 if (t->to_thread == thread) {
4055 t->to_proc = NULL;
4056 t->to_thread = NULL;
4057 if (t->buffer) {
4058 t->buffer->transaction = NULL;
4059 t->buffer = NULL;
4060 }
4061 t = t->to_parent;
4062 } else if (t->from == thread) {
4063 t->from = NULL;
4064 t = t->from_parent;
4065 } else
4066 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004067 spin_unlock(&last_t->lock);
4068 if (t)
4069 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004070 }
Todd Kjosb4827902017-05-25 15:52:17 -07004071 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004072
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004073 if (send_reply)
4074 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004075 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004076 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004077 return active_transactions;
4078}
4079
4080static unsigned int binder_poll(struct file *filp,
4081 struct poll_table_struct *wait)
4082{
4083 struct binder_proc *proc = filp->private_data;
4084 struct binder_thread *thread = NULL;
4085 int wait_for_proc_work;
4086
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004087 binder_lock(__func__);
4088
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004089 thread = binder_get_thread(proc);
4090
Martijn Coenen995a36e2017-06-02 13:36:52 -07004091 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004092 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07004093 binder_worklist_empty_ilocked(&thread->todo);
4094 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004095
4096 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004097
4098 if (wait_for_proc_work) {
4099 if (binder_has_proc_work(proc, thread))
4100 return POLLIN;
4101 poll_wait(filp, &proc->wait, wait);
4102 if (binder_has_proc_work(proc, thread))
4103 return POLLIN;
4104 } else {
4105 if (binder_has_thread_work(thread))
4106 return POLLIN;
4107 poll_wait(filp, &thread->wait, wait);
4108 if (binder_has_thread_work(thread))
4109 return POLLIN;
4110 }
4111 return 0;
4112}
4113
Tair Rzayev78260ac2014-06-03 22:27:21 +03004114static int binder_ioctl_write_read(struct file *filp,
4115 unsigned int cmd, unsigned long arg,
4116 struct binder_thread *thread)
4117{
4118 int ret = 0;
4119 struct binder_proc *proc = filp->private_data;
4120 unsigned int size = _IOC_SIZE(cmd);
4121 void __user *ubuf = (void __user *)arg;
4122 struct binder_write_read bwr;
4123
4124 if (size != sizeof(struct binder_write_read)) {
4125 ret = -EINVAL;
4126 goto out;
4127 }
4128 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4129 ret = -EFAULT;
4130 goto out;
4131 }
4132 binder_debug(BINDER_DEBUG_READ_WRITE,
4133 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4134 proc->pid, thread->pid,
4135 (u64)bwr.write_size, (u64)bwr.write_buffer,
4136 (u64)bwr.read_size, (u64)bwr.read_buffer);
4137
4138 if (bwr.write_size > 0) {
4139 ret = binder_thread_write(proc, thread,
4140 bwr.write_buffer,
4141 bwr.write_size,
4142 &bwr.write_consumed);
4143 trace_binder_write_done(ret);
4144 if (ret < 0) {
4145 bwr.read_consumed = 0;
4146 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4147 ret = -EFAULT;
4148 goto out;
4149 }
4150 }
4151 if (bwr.read_size > 0) {
4152 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4153 bwr.read_size,
4154 &bwr.read_consumed,
4155 filp->f_flags & O_NONBLOCK);
4156 trace_binder_read_done(ret);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004157 if (!binder_worklist_empty(proc, &proc->todo))
Tair Rzayev78260ac2014-06-03 22:27:21 +03004158 wake_up_interruptible(&proc->wait);
4159 if (ret < 0) {
4160 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4161 ret = -EFAULT;
4162 goto out;
4163 }
4164 }
4165 binder_debug(BINDER_DEBUG_READ_WRITE,
4166 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4167 proc->pid, thread->pid,
4168 (u64)bwr.write_consumed, (u64)bwr.write_size,
4169 (u64)bwr.read_consumed, (u64)bwr.read_size);
4170 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4171 ret = -EFAULT;
4172 goto out;
4173 }
4174out:
4175 return ret;
4176}
4177
4178static int binder_ioctl_set_ctx_mgr(struct file *filp)
4179{
4180 int ret = 0;
4181 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004182 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004183 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004184 kuid_t curr_euid = current_euid();
4185
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004186 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004187 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004188 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4189 ret = -EBUSY;
4190 goto out;
4191 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004192 ret = security_binder_set_context_mgr(proc->tsk);
4193 if (ret < 0)
4194 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004195 if (uid_valid(context->binder_context_mgr_uid)) {
4196 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004197 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4198 from_kuid(&init_user_ns, curr_euid),
4199 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004200 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004201 ret = -EPERM;
4202 goto out;
4203 }
4204 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004205 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004206 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004207 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004208 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004209 ret = -ENOMEM;
4210 goto out;
4211 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004212 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004213 new_node->local_weak_refs++;
4214 new_node->local_strong_refs++;
4215 new_node->has_strong_ref = 1;
4216 new_node->has_weak_ref = 1;
4217 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004218 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004219 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004220out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004221 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004222 return ret;
4223}
4224
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004225static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4226{
4227 int ret;
4228 struct binder_proc *proc = filp->private_data;
4229 struct binder_thread *thread;
4230 unsigned int size = _IOC_SIZE(cmd);
4231 void __user *ubuf = (void __user *)arg;
4232
Tair Rzayev78260ac2014-06-03 22:27:21 +03004233 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4234 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004235
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004236 trace_binder_ioctl(cmd, arg);
4237
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004238 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4239 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004240 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004241
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004242 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004243 thread = binder_get_thread(proc);
4244 if (thread == NULL) {
4245 ret = -ENOMEM;
4246 goto err;
4247 }
4248
4249 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004250 case BINDER_WRITE_READ:
4251 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4252 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004253 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004254 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004255 case BINDER_SET_MAX_THREADS: {
4256 int max_threads;
4257
4258 if (copy_from_user(&max_threads, ubuf,
4259 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004260 ret = -EINVAL;
4261 goto err;
4262 }
Todd Kjosd600e902017-05-25 17:35:02 -07004263 binder_inner_proc_lock(proc);
4264 proc->max_threads = max_threads;
4265 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004266 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004267 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004268 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004269 ret = binder_ioctl_set_ctx_mgr(filp);
4270 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004271 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004272 break;
4273 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304274 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004275 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004276 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004277 thread = NULL;
4278 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004279 case BINDER_VERSION: {
4280 struct binder_version __user *ver = ubuf;
4281
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004282 if (size != sizeof(struct binder_version)) {
4283 ret = -EINVAL;
4284 goto err;
4285 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004286 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4287 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004288 ret = -EINVAL;
4289 goto err;
4290 }
4291 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004292 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004293 default:
4294 ret = -EINVAL;
4295 goto err;
4296 }
4297 ret = 0;
4298err:
4299 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004300 thread->looper_need_return = false;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004301 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004302 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4303 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304304 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004305err_unlocked:
4306 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004307 return ret;
4308}
4309
4310static void binder_vma_open(struct vm_area_struct *vma)
4311{
4312 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004313
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004314 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304315 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004316 proc->pid, vma->vm_start, vma->vm_end,
4317 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4318 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004319}
4320
4321static void binder_vma_close(struct vm_area_struct *vma)
4322{
4323 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004324
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004325 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304326 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004327 proc->pid, vma->vm_start, vma->vm_end,
4328 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4329 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004330 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004331 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4332}
4333
Vinayak Menonddac7d52014-06-02 18:17:59 +05304334static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4335{
4336 return VM_FAULT_SIGBUS;
4337}
4338
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004339static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004340 .open = binder_vma_open,
4341 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304342 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004343};
4344
Todd Kjosd325d372016-10-10 10:40:53 -07004345static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4346{
4347 int ret;
4348 struct binder_proc *proc = filp->private_data;
4349 const char *failure_string;
4350
4351 if (proc->tsk != current->group_leader)
4352 return -EINVAL;
4353
4354 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4355 vma->vm_end = vma->vm_start + SZ_4M;
4356
4357 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4358 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4359 __func__, proc->pid, vma->vm_start, vma->vm_end,
4360 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4361 (unsigned long)pgprot_val(vma->vm_page_prot));
4362
4363 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4364 ret = -EPERM;
4365 failure_string = "bad vm_flags";
4366 goto err_bad_arg;
4367 }
4368 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4369 vma->vm_ops = &binder_vm_ops;
4370 vma->vm_private_data = proc;
4371
4372 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4373 if (ret)
4374 return ret;
4375 proc->files = get_files_struct(current);
4376 return 0;
4377
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004378err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004379 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004380 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4381 return ret;
4382}
4383
4384static int binder_open(struct inode *nodp, struct file *filp)
4385{
4386 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004387 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004388
4389 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4390 current->group_leader->pid, current->pid);
4391
4392 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4393 if (proc == NULL)
4394 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004395 spin_lock_init(&proc->inner_lock);
4396 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004397 get_task_struct(current->group_leader);
4398 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004399 INIT_LIST_HEAD(&proc->todo);
4400 init_waitqueue_head(&proc->wait);
4401 proc->default_priority = task_nice(current);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004402 binder_dev = container_of(filp->private_data, struct binder_device,
4403 miscdev);
4404 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004405 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004406
4407 binder_lock(__func__);
4408
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004409 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004410 proc->pid = current->group_leader->pid;
4411 INIT_LIST_HEAD(&proc->delivered_death);
4412 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004413
4414 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004415
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004416 mutex_lock(&binder_procs_lock);
4417 hlist_add_head(&proc->proc_node, &binder_procs);
4418 mutex_unlock(&binder_procs_lock);
4419
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004420 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004421 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004422
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004423 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004424 /*
4425 * proc debug entries are shared between contexts, so
4426 * this will fail if the process tries to open the driver
4427 * again with a different context. The priting code will
4428 * anyway print all contexts that a given PID has, so this
4429 * is not a problem.
4430 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004431 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004432 binder_debugfs_dir_entry_proc,
4433 (void *)(unsigned long)proc->pid,
4434 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004435 }
4436
4437 return 0;
4438}
4439
4440static int binder_flush(struct file *filp, fl_owner_t id)
4441{
4442 struct binder_proc *proc = filp->private_data;
4443
4444 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4445
4446 return 0;
4447}
4448
4449static void binder_deferred_flush(struct binder_proc *proc)
4450{
4451 struct rb_node *n;
4452 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004453
Todd Kjosb4827902017-05-25 15:52:17 -07004454 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004455 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4456 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004457
Todd Kjos6798e6d2017-01-06 14:19:25 -08004458 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004459 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4460 wake_up_interruptible(&thread->wait);
4461 wake_count++;
4462 }
4463 }
Todd Kjosb4827902017-05-25 15:52:17 -07004464 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004465 wake_up_interruptible_all(&proc->wait);
4466
4467 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4468 "binder_flush: %d woke %d threads\n", proc->pid,
4469 wake_count);
4470}
4471
4472static int binder_release(struct inode *nodp, struct file *filp)
4473{
4474 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004475
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004476 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004477 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4478
4479 return 0;
4480}
4481
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004482static int binder_node_release(struct binder_node *node, int refs)
4483{
4484 struct binder_ref *ref;
4485 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004486 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004487
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004488 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004489
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004490 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004491 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004492 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004493 /*
4494 * The caller must have taken a temporary ref on the node,
4495 */
4496 BUG_ON(!node->tmp_refs);
4497 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004498 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004499 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004500 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004501
4502 return refs;
4503 }
4504
4505 node->proc = NULL;
4506 node->local_strong_refs = 0;
4507 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004508 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004509
4510 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004511 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004512 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004513
4514 hlist_for_each_entry(ref, &node->refs, node_entry) {
4515 refs++;
4516
4517 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08004518 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004519
4520 death++;
4521
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004522 binder_inner_proc_lock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004523 if (list_empty(&ref->death->work.entry)) {
4524 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004525 binder_enqueue_work_ilocked(&ref->death->work,
4526 &ref->proc->todo);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004527 wake_up_interruptible(&ref->proc->wait);
4528 } else
4529 BUG();
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004530 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004531 }
4532
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004533 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4534 "node %d now dead, refs %d, death %d\n",
4535 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004536 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004537 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004538
4539 return refs;
4540}
4541
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004542static void binder_deferred_release(struct binder_proc *proc)
4543{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004544 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004545 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07004546 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004547
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004548 BUG_ON(proc->files);
4549
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004550 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004551 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004552 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004553
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004554 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004555 if (context->binder_context_mgr_node &&
4556 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004557 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004558 "%s: %d context_mgr_node gone\n",
4559 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004560 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004561 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004562 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07004563 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004564 /*
4565 * Make sure proc stays alive after we
4566 * remove all the threads
4567 */
4568 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004569
Todd Kjos2f993e22017-05-12 14:42:55 -07004570 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004571 threads = 0;
4572 active_transactions = 0;
4573 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004574 struct binder_thread *thread;
4575
4576 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004577 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004578 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07004579 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07004580 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004581 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004582
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004583 nodes = 0;
4584 incoming_refs = 0;
4585 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004586 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004587
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004588 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004589 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07004590 /*
4591 * take a temporary ref on the node before
4592 * calling binder_node_release() which will either
4593 * kfree() the node or call binder_put_node()
4594 */
Todd Kjos425d23f2017-06-12 12:07:26 -07004595 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004596 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07004597 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004598 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07004599 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004600 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004601 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004602
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004603 outgoing_refs = 0;
4604 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004605 struct binder_ref *ref;
4606
4607 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004608 outgoing_refs++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07004609 binder_cleanup_ref(ref);
4610 binder_free_ref(ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004611 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004612
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004613 binder_release_work(proc, &proc->todo);
4614 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004615
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004616 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07004617 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004618 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07004619 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004620
Todd Kjos2f993e22017-05-12 14:42:55 -07004621 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004622}
4623
4624static void binder_deferred_func(struct work_struct *work)
4625{
4626 struct binder_proc *proc;
4627 struct files_struct *files;
4628
4629 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09004630
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004631 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004632 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004633 mutex_lock(&binder_deferred_lock);
4634 if (!hlist_empty(&binder_deferred_list)) {
4635 proc = hlist_entry(binder_deferred_list.first,
4636 struct binder_proc, deferred_work_node);
4637 hlist_del_init(&proc->deferred_work_node);
4638 defer = proc->deferred_work;
4639 proc->deferred_work = 0;
4640 } else {
4641 proc = NULL;
4642 defer = 0;
4643 }
4644 mutex_unlock(&binder_deferred_lock);
4645
4646 files = NULL;
4647 if (defer & BINDER_DEFERRED_PUT_FILES) {
4648 files = proc->files;
4649 if (files)
4650 proc->files = NULL;
4651 }
4652
4653 if (defer & BINDER_DEFERRED_FLUSH)
4654 binder_deferred_flush(proc);
4655
4656 if (defer & BINDER_DEFERRED_RELEASE)
4657 binder_deferred_release(proc); /* frees proc */
4658
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004659 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004660 if (files)
4661 put_files_struct(files);
4662 } while (proc);
4663}
4664static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4665
4666static void
4667binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4668{
4669 mutex_lock(&binder_deferred_lock);
4670 proc->deferred_work |= defer;
4671 if (hlist_unhashed(&proc->deferred_work_node)) {
4672 hlist_add_head(&proc->deferred_work_node,
4673 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05304674 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004675 }
4676 mutex_unlock(&binder_deferred_lock);
4677}
4678
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004679static void print_binder_transaction(struct seq_file *m, const char *prefix,
4680 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004681{
Todd Kjos2f993e22017-05-12 14:42:55 -07004682 spin_lock(&t->lock);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004683 seq_printf(m,
4684 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4685 prefix, t->debug_id, t,
4686 t->from ? t->from->proc->pid : 0,
4687 t->from ? t->from->pid : 0,
4688 t->to_proc ? t->to_proc->pid : 0,
4689 t->to_thread ? t->to_thread->pid : 0,
4690 t->code, t->flags, t->priority, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07004691 spin_unlock(&t->lock);
4692
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004693 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004694 seq_puts(m, " buffer free\n");
4695 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004696 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004697 if (t->buffer->target_node)
4698 seq_printf(m, " node %d",
4699 t->buffer->target_node->debug_id);
4700 seq_printf(m, " size %zd:%zd data %p\n",
4701 t->buffer->data_size, t->buffer->offsets_size,
4702 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004703}
4704
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004705static void print_binder_work_ilocked(struct seq_file *m, const char *prefix,
4706 const char *transaction_prefix,
4707 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004708{
4709 struct binder_node *node;
4710 struct binder_transaction *t;
4711
4712 switch (w->type) {
4713 case BINDER_WORK_TRANSACTION:
4714 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004715 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004716 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004717 case BINDER_WORK_RETURN_ERROR: {
4718 struct binder_error *e = container_of(
4719 w, struct binder_error, work);
4720
4721 seq_printf(m, "%stransaction error: %u\n",
4722 prefix, e->cmd);
4723 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004724 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004725 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004726 break;
4727 case BINDER_WORK_NODE:
4728 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08004729 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4730 prefix, node->debug_id,
4731 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004732 break;
4733 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004734 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004735 break;
4736 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004737 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004738 break;
4739 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004740 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004741 break;
4742 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004743 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004744 break;
4745 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004746}
4747
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004748static void print_binder_thread_ilocked(struct seq_file *m,
4749 struct binder_thread *thread,
4750 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004751{
4752 struct binder_transaction *t;
4753 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004754 size_t start_pos = m->count;
4755 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004756
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004757 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07004758 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08004759 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07004760 thread->looper_need_return,
4761 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004762 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004763 t = thread->transaction_stack;
4764 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004765 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004766 print_binder_transaction(m,
4767 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004768 t = t->from_parent;
4769 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004770 print_binder_transaction(m,
4771 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004772 t = t->to_parent;
4773 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004774 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004775 t = NULL;
4776 }
4777 }
4778 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004779 print_binder_work_ilocked(m, " ",
4780 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004781 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004782 if (!print_always && m->count == header_pos)
4783 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004784}
4785
Todd Kjos425d23f2017-06-12 12:07:26 -07004786static void print_binder_node_nilocked(struct seq_file *m,
4787 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004788{
4789 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004790 struct binder_work *w;
4791 int count;
4792
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004793 WARN_ON(!spin_is_locked(&node->lock));
Todd Kjos425d23f2017-06-12 12:07:26 -07004794 if (node->proc)
4795 WARN_ON(!spin_is_locked(&node->proc->inner_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004796
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004797 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08004798 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004799 count++;
4800
Todd Kjosf22abc72017-05-09 11:08:05 -07004801 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08004802 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004803 node->has_strong_ref, node->has_weak_ref,
4804 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07004805 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004806 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004807 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08004808 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004809 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004810 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004811 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004812 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004813 list_for_each_entry(w, &node->async_todo, entry)
4814 print_binder_work_ilocked(m, " ",
4815 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004816 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004817}
4818
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004819static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004820{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004821 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004822 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4823 ref->data.debug_id, ref->data.desc,
4824 ref->node->proc ? "" : "dead ",
4825 ref->node->debug_id, ref->data.strong,
4826 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004827 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004828}
4829
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004830static void print_binder_proc(struct seq_file *m,
4831 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004832{
4833 struct binder_work *w;
4834 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004835 size_t start_pos = m->count;
4836 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07004837 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004838
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004839 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004840 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004841 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004842
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004843 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004844 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004845 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004846 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07004847
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004848 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004849 struct binder_node *node = rb_entry(n, struct binder_node,
4850 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07004851 /*
4852 * take a temporary reference on the node so it
4853 * survives and isn't removed from the tree
4854 * while we print it.
4855 */
4856 binder_inc_node_tmpref_ilocked(node);
4857 /* Need to drop inner lock to take node lock */
4858 binder_inner_proc_unlock(proc);
4859 if (last_node)
4860 binder_put_node(last_node);
4861 binder_node_inner_lock(node);
4862 print_binder_node_nilocked(m, node);
4863 binder_node_inner_unlock(node);
4864 last_node = node;
4865 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004866 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004867 binder_inner_proc_unlock(proc);
4868 if (last_node)
4869 binder_put_node(last_node);
4870
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004871 if (print_all) {
4872 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004873 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004874 n = rb_next(n))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004875 print_binder_ref(m, rb_entry(n, struct binder_ref,
4876 rb_node_desc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004877 }
Todd Kjosd325d372016-10-10 10:40:53 -07004878 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004879 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004880 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004881 print_binder_work_ilocked(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004882 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004883 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004884 break;
4885 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004886 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004887 if (!print_all && m->count == header_pos)
4888 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004889}
4890
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004891static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004892 "BR_ERROR",
4893 "BR_OK",
4894 "BR_TRANSACTION",
4895 "BR_REPLY",
4896 "BR_ACQUIRE_RESULT",
4897 "BR_DEAD_REPLY",
4898 "BR_TRANSACTION_COMPLETE",
4899 "BR_INCREFS",
4900 "BR_ACQUIRE",
4901 "BR_RELEASE",
4902 "BR_DECREFS",
4903 "BR_ATTEMPT_ACQUIRE",
4904 "BR_NOOP",
4905 "BR_SPAWN_LOOPER",
4906 "BR_FINISHED",
4907 "BR_DEAD_BINDER",
4908 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4909 "BR_FAILED_REPLY"
4910};
4911
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004912static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004913 "BC_TRANSACTION",
4914 "BC_REPLY",
4915 "BC_ACQUIRE_RESULT",
4916 "BC_FREE_BUFFER",
4917 "BC_INCREFS",
4918 "BC_ACQUIRE",
4919 "BC_RELEASE",
4920 "BC_DECREFS",
4921 "BC_INCREFS_DONE",
4922 "BC_ACQUIRE_DONE",
4923 "BC_ATTEMPT_ACQUIRE",
4924 "BC_REGISTER_LOOPER",
4925 "BC_ENTER_LOOPER",
4926 "BC_EXIT_LOOPER",
4927 "BC_REQUEST_DEATH_NOTIFICATION",
4928 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02004929 "BC_DEAD_BINDER_DONE",
4930 "BC_TRANSACTION_SG",
4931 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004932};
4933
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004934static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004935 "proc",
4936 "thread",
4937 "node",
4938 "ref",
4939 "death",
4940 "transaction",
4941 "transaction_complete"
4942};
4943
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004944static void print_binder_stats(struct seq_file *m, const char *prefix,
4945 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004946{
4947 int i;
4948
4949 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004950 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004951 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004952 int temp = atomic_read(&stats->bc[i]);
4953
4954 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004955 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004956 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004957 }
4958
4959 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004960 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004961 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004962 int temp = atomic_read(&stats->br[i]);
4963
4964 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004965 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004966 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004967 }
4968
4969 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004970 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004971 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004972 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004973 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004974 int created = atomic_read(&stats->obj_created[i]);
4975 int deleted = atomic_read(&stats->obj_deleted[i]);
4976
4977 if (created || deleted)
4978 seq_printf(m, "%s%s: active %d total %d\n",
4979 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004980 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004981 created - deleted,
4982 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004983 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004984}
4985
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004986static void print_binder_proc_stats(struct seq_file *m,
4987 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004988{
4989 struct binder_work *w;
4990 struct rb_node *n;
4991 int count, strong, weak;
Todd Kjosb4827902017-05-25 15:52:17 -07004992 size_t free_async_space =
4993 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004994
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004995 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004996 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004997 count = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07004998 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004999 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5000 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005001 seq_printf(m, " threads: %d\n", count);
5002 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005003 " ready threads %d\n"
5004 " free async space %zd\n", proc->requested_threads,
5005 proc->requested_threads_started, proc->max_threads,
Todd Kjosd325d372016-10-10 10:40:53 -07005006 proc->ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005007 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005008 count = 0;
5009 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5010 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005011 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005012 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005013 count = 0;
5014 strong = 0;
5015 weak = 0;
5016 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5017 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5018 rb_node_desc);
5019 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005020 strong += ref->data.strong;
5021 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005022 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005023 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005024
Todd Kjosd325d372016-10-10 10:40:53 -07005025 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005026 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005027
5028 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005029 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005030 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005031 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005032 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005033 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005034 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005035 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005036
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005037 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005038}
5039
5040
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005041static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005042{
5043 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005044 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005045 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005046
Todd Kjos48b33212017-05-24 11:53:13 -07005047 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005048
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005049 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005050
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005051 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005052 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005053 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005054 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5055 /*
5056 * take a temporary reference on the node so it
5057 * survives and isn't removed from the list
5058 * while we print it.
5059 */
5060 node->tmp_refs++;
5061 spin_unlock(&binder_dead_nodes_lock);
5062 if (last_node)
5063 binder_put_node(last_node);
5064 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005065 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005066 binder_node_unlock(node);
5067 last_node = node;
5068 spin_lock(&binder_dead_nodes_lock);
5069 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005070 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005071 if (last_node)
5072 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005073
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005074 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005075 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005076 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005077 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005078 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005079 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005080}
5081
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005082static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005083{
5084 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005085
Todd Kjos48b33212017-05-24 11:53:13 -07005086 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005087
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005088 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005089
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005090 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005091
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005092 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005093 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005094 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005095 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005096 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005097 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005098}
5099
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005100static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005101{
5102 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005103
Todd Kjos48b33212017-05-24 11:53:13 -07005104 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005105
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005106 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005107 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005108 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005109 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005110 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005111 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005112 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005113}
5114
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005115static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005116{
Riley Andrews83050a42016-02-09 21:05:33 -08005117 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005118 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005119
Todd Kjos48b33212017-05-24 11:53:13 -07005120 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08005121
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005122 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005123 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005124 if (itr->pid == pid) {
5125 seq_puts(m, "binder proc state:\n");
5126 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005127 }
5128 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005129 mutex_unlock(&binder_procs_lock);
5130
Todd Kjos48b33212017-05-24 11:53:13 -07005131 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005132 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005133}
5134
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005135static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005136 struct binder_transaction_log_entry *e)
5137{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005138 int debug_id = READ_ONCE(e->debug_id_done);
5139 /*
5140 * read barrier to guarantee debug_id_done read before
5141 * we print the log values
5142 */
5143 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005144 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005145 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005146 e->debug_id, (e->call_type == 2) ? "reply" :
5147 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005148 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005149 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5150 e->return_error, e->return_error_param,
5151 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005152 /*
5153 * read-barrier to guarantee read of debug_id_done after
5154 * done printing the fields of the entry
5155 */
5156 smp_rmb();
5157 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5158 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005159}
5160
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005161static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005162{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005163 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005164 unsigned int log_cur = atomic_read(&log->cur);
5165 unsigned int count;
5166 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005167 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005168
Todd Kjos1cfe6272017-05-24 13:33:28 -07005169 count = log_cur + 1;
5170 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5171 0 : count % ARRAY_SIZE(log->entry);
5172 if (count > ARRAY_SIZE(log->entry) || log->full)
5173 count = ARRAY_SIZE(log->entry);
5174 for (i = 0; i < count; i++) {
5175 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5176
5177 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005178 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005179 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005180}
5181
5182static const struct file_operations binder_fops = {
5183 .owner = THIS_MODULE,
5184 .poll = binder_poll,
5185 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005186 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005187 .mmap = binder_mmap,
5188 .open = binder_open,
5189 .flush = binder_flush,
5190 .release = binder_release,
5191};
5192
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005193BINDER_DEBUG_ENTRY(state);
5194BINDER_DEBUG_ENTRY(stats);
5195BINDER_DEBUG_ENTRY(transactions);
5196BINDER_DEBUG_ENTRY(transaction_log);
5197
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005198static int __init init_binder_device(const char *name)
5199{
5200 int ret;
5201 struct binder_device *binder_device;
5202
5203 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5204 if (!binder_device)
5205 return -ENOMEM;
5206
5207 binder_device->miscdev.fops = &binder_fops;
5208 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5209 binder_device->miscdev.name = name;
5210
5211 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5212 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005213 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005214
5215 ret = misc_register(&binder_device->miscdev);
5216 if (ret < 0) {
5217 kfree(binder_device);
5218 return ret;
5219 }
5220
5221 hlist_add_head(&binder_device->hlist, &binder_devices);
5222
5223 return ret;
5224}
5225
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005226static int __init binder_init(void)
5227{
5228 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005229 char *device_name, *device_names;
5230 struct binder_device *device;
5231 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005232
Todd Kjos1cfe6272017-05-24 13:33:28 -07005233 atomic_set(&binder_transaction_log.cur, ~0U);
5234 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5235
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005236 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5237 if (binder_debugfs_dir_entry_root)
5238 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5239 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005240
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005241 if (binder_debugfs_dir_entry_root) {
5242 debugfs_create_file("state",
5243 S_IRUGO,
5244 binder_debugfs_dir_entry_root,
5245 NULL,
5246 &binder_state_fops);
5247 debugfs_create_file("stats",
5248 S_IRUGO,
5249 binder_debugfs_dir_entry_root,
5250 NULL,
5251 &binder_stats_fops);
5252 debugfs_create_file("transactions",
5253 S_IRUGO,
5254 binder_debugfs_dir_entry_root,
5255 NULL,
5256 &binder_transactions_fops);
5257 debugfs_create_file("transaction_log",
5258 S_IRUGO,
5259 binder_debugfs_dir_entry_root,
5260 &binder_transaction_log,
5261 &binder_transaction_log_fops);
5262 debugfs_create_file("failed_transaction_log",
5263 S_IRUGO,
5264 binder_debugfs_dir_entry_root,
5265 &binder_transaction_log_failed,
5266 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005267 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005268
5269 /*
5270 * Copy the module_parameter string, because we don't want to
5271 * tokenize it in-place.
5272 */
5273 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5274 if (!device_names) {
5275 ret = -ENOMEM;
5276 goto err_alloc_device_names_failed;
5277 }
5278 strcpy(device_names, binder_devices_param);
5279
5280 while ((device_name = strsep(&device_names, ","))) {
5281 ret = init_binder_device(device_name);
5282 if (ret)
5283 goto err_init_binder_device_failed;
5284 }
5285
5286 return ret;
5287
5288err_init_binder_device_failed:
5289 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5290 misc_deregister(&device->miscdev);
5291 hlist_del(&device->hlist);
5292 kfree(device);
5293 }
5294err_alloc_device_names_failed:
5295 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5296
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005297 return ret;
5298}
5299
5300device_initcall(binder_init);
5301
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005302#define CREATE_TRACE_POINTS
5303#include "binder_trace.h"
5304
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005305MODULE_LICENSE("GPL v2");