blob: 964a80997908b927dc93bbd134dd4c6954f9b70f [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
Martijn Coenen995a36e2017-06-02 13:36:52 -070033 * proc->delivered_death and node->async_todo), as well as
34 * thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070079#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070080#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090081
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070082static DEFINE_MUTEX(binder_main_lock);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070083
84static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090085static DEFINE_MUTEX(binder_deferred_lock);
86
Martijn Coenen6b7c7122016-09-30 16:08:09 +020087static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090088static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070089static DEFINE_MUTEX(binder_procs_lock);
90
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070092static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090093
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070094static struct dentry *binder_debugfs_dir_entry_root;
95static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070096static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090097
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070098#define BINDER_DEBUG_ENTRY(name) \
99static int binder_##name##_open(struct inode *inode, struct file *file) \
100{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700101 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700102} \
103\
104static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110}
111
112static int binder_proc_show(struct seq_file *m, void *unused);
113BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900114
115/* This is only defined in include/asm-arm/sizes.h */
116#ifndef SZ_1K
117#define SZ_1K 0x400
118#endif
119
120#ifndef SZ_4M
121#define SZ_4M 0x400000
122#endif
123
124#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900144};
145static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200149static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150module_param_named(devices, binder_devices_param, charp, S_IRUGO);
151
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900152static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153static int binder_stop_on_user_error;
154
155static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157{
158 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900159
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164}
165module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168#define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400171 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900172 } while (0)
173
174#define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400177 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
Martijn Coenen00c80372016-07-13 12:06:49 +0200182#define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
Martijn Coenen5a6da532016-09-30 14:10:07 +0200187#define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200190#define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900193enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202};
203
204struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900209};
210
211static struct binder_stats binder_stats;
212
213static inline void binder_stats_deleted(enum binder_stat_types type)
214{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700215 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900216}
217
218static inline void binder_stats_created(enum binder_stat_types type)
219{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700220 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900221}
222
223struct binder_transaction_log_entry {
224 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700225 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200238 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900239};
240struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700241 atomic_t cur;
242 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900243 struct binder_transaction_log_entry entry[32];
244};
245static struct binder_transaction_log binder_transaction_log;
246static struct binder_transaction_log binder_transaction_log_failed;
247
248static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250{
251 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700252 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900253
Todd Kjos1cfe6272017-05-24 13:33:28 -0700254 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900255 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900265 return e;
266}
267
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200268struct binder_context {
269 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700270 struct mutex context_mgr_node_lock;
271
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200272 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200273 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200274};
275
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200276struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200280};
281
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700282/**
283 * struct binder_work - work enqueued on a worklist
284 * @entry: node enqueued on list
285 * @type: type of work to be performed
286 *
287 * There are separate work lists for proc, thread, and node (async).
288 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900289struct binder_work {
290 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700291
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900292 enum {
293 BINDER_WORK_TRANSACTION = 1,
294 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700295 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900296 BINDER_WORK_NODE,
297 BINDER_WORK_DEAD_BINDER,
298 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
299 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
300 } type;
301};
302
Todd Kjos858b8da2017-04-21 17:35:12 -0700303struct binder_error {
304 struct binder_work work;
305 uint32_t cmd;
306};
307
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700308/**
309 * struct binder_node - binder node bookkeeping
310 * @debug_id: unique ID for debugging
311 * (invariant after initialized)
312 * @lock: lock for node fields
313 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700314 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700315 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700316 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700317 * @dead_node: element for binder_dead_nodes list
318 * (protected by binder_dead_nodes_lock)
319 * @proc: binder_proc that owns this node
320 * (invariant after initialized)
321 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700322 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700323 * @internal_strong_refs: used to take strong references when
324 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700327 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700330 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700331 * (protected by @proc->inner_lock if @proc
332 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700333 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700334 * (protected by @proc->inner_lock while @proc
335 * is valid, and by binder_dead_nodes_lock
336 * if @proc is NULL. During inc/dec and node release
337 * it is also protected by @lock to provide safety
338 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700339 * @ptr: userspace pointer for node
340 * (invariant, no lock needed)
341 * @cookie: userspace cookie for node
342 * (invariant, no lock needed)
343 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700346 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700349 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700352 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700353 * (protected by @proc->inner_lock if @proc
354 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700355 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700356 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700362 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700363 *
364 * Bookkeeping structure for binder nodes.
365 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900366struct binder_node {
367 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700368 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900369 struct binder_work work;
370 union {
371 struct rb_node rb_node;
372 struct hlist_node dead_node;
373 };
374 struct binder_proc *proc;
375 struct hlist_head refs;
376 int internal_strong_refs;
377 int local_weak_refs;
378 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700379 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800380 binder_uintptr_t ptr;
381 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700382 struct {
383 /*
384 * bitfield elements protected by
385 * proc inner_lock
386 */
387 u8 has_strong_ref:1;
388 u8 pending_strong_ref:1;
389 u8 has_weak_ref:1;
390 u8 pending_weak_ref:1;
391 };
392 struct {
393 /*
394 * invariant after initialization
395 */
396 u8 accept_fds:1;
397 u8 min_priority;
398 };
399 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400 struct list_head async_todo;
401};
402
403struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700404 /**
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
408 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900409 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800410 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411};
412
Todd Kjosb0117bb2017-05-08 09:16:27 -0700413/**
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
419 *
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
424 */
425struct binder_ref_data {
426 int debug_id;
427 uint32_t desc;
428 int strong;
429 int weak;
430};
431
432/**
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700438 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
444 *
445 * Structure to track references from procA to target node (on procB). This
446 * structure is unsafe to access without holding @proc->outer_lock.
447 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900448struct binder_ref {
449 /* Lookups needed: */
450 /* node + proc => ref (transaction) */
451 /* desc + proc => ref (transaction, inc/dec ref) */
452 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700453 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900454 struct rb_node rb_node_desc;
455 struct rb_node rb_node_node;
456 struct hlist_node node_entry;
457 struct binder_proc *proc;
458 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900459 struct binder_ref_death *death;
460};
461
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900462enum binder_deferred_state {
463 BINDER_DEFERRED_PUT_FILES = 0x01,
464 BINDER_DEFERRED_FLUSH = 0x02,
465 BINDER_DEFERRED_RELEASE = 0x04,
466};
467
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700468/**
469 * struct binder_proc - binder process bookkeeping
470 * @proc_node: element for binder_procs list
471 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700472 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700473 * @nodes: rbtree of binder nodes associated with
474 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700475 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700476 * @refs_by_desc: rbtree of refs ordered by ref->desc
477 * @refs_by_node: rbtree of refs ordered by ref->node
478 * @pid PID of group_leader of process
479 * (invariant after initialized)
480 * @tsk task_struct for group_leader of process
481 * (invariant after initialized)
482 * @files files_struct for process
483 * (invariant after initialized)
484 * @deferred_work_node: element for binder_deferred_list
485 * (protected by binder_deferred_lock)
486 * @deferred_work: bitmap of deferred work to perform
487 * (protected by binder_deferred_lock)
488 * @is_dead: process is dead and awaiting free
489 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700490 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700491 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700492 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700493 * @wait: wait queue head to wait for proc work
494 * (invariant after initialized)
495 * @stats: per-process binder statistics
496 * (atomics, no lock needed)
497 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700498 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700499 * @max_threads: cap on number of binder threads
500 * @requested_threads: number of binder threads requested but not
501 * yet started. In current implementation, can
502 * only be 0 or 1.
503 * @requested_threads_started: number binder threads started
504 * @ready_threads: number of threads waiting for proc work
505 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700506 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700507 * @default_priority: default scheduler priority
508 * (invariant after initialized)
509 * @debugfs_entry: debugfs node
510 * @alloc: binder allocator bookkeeping
511 * @context: binder_context for this proc
512 * (invariant after initialized)
513 * @inner_lock: can nest under outer_lock and/or node lock
514 * @outer_lock: no nesting under innor or node lock
515 * Lock order: 1) outer, 2) node, 3) inner
516 *
517 * Bookkeeping structure for binder processes
518 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900519struct binder_proc {
520 struct hlist_node proc_node;
521 struct rb_root threads;
522 struct rb_root nodes;
523 struct rb_root refs_by_desc;
524 struct rb_root refs_by_node;
525 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900526 struct task_struct *tsk;
527 struct files_struct *files;
528 struct hlist_node deferred_work_node;
529 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700530 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900531
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900532 struct list_head todo;
533 wait_queue_head_t wait;
534 struct binder_stats stats;
535 struct list_head delivered_death;
536 int max_threads;
537 int requested_threads;
538 int requested_threads_started;
539 int ready_threads;
Todd Kjos2f993e22017-05-12 14:42:55 -0700540 int tmp_ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900541 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700542 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700543 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200544 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700545 spinlock_t inner_lock;
546 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900547};
548
549enum {
550 BINDER_LOOPER_STATE_REGISTERED = 0x01,
551 BINDER_LOOPER_STATE_ENTERED = 0x02,
552 BINDER_LOOPER_STATE_EXITED = 0x04,
553 BINDER_LOOPER_STATE_INVALID = 0x08,
554 BINDER_LOOPER_STATE_WAITING = 0x10,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900555};
556
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700557/**
558 * struct binder_thread - binder thread bookkeeping
559 * @proc: binder process for this thread
560 * (invariant after initialization)
561 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700562 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700563 * @pid: PID for this thread
564 * (invariant after initialization)
565 * @looper: bitmap of looping state
566 * (only accessed by this thread)
567 * @looper_needs_return: looping thread needs to exit driver
568 * (no lock needed)
569 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700570 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700571 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700572 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700573 * @return_error: transaction errors reported by this thread
574 * (only accessed by this thread)
575 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700576 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700577 * @wait: wait queue for thread work
578 * @stats: per-thread statistics
579 * (atomics, no lock needed)
580 * @tmp_ref: temporary reference to indicate thread is in use
581 * (atomic since @proc->inner_lock cannot
582 * always be acquired)
583 * @is_dead: thread is dead and awaiting free
584 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700585 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700586 *
587 * Bookkeeping structure for binder threads.
588 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900589struct binder_thread {
590 struct binder_proc *proc;
591 struct rb_node rb_node;
592 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800593 int looper; /* only modified by this thread */
594 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900595 struct binder_transaction *transaction_stack;
596 struct list_head todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700597 struct binder_error return_error;
598 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900599 wait_queue_head_t wait;
600 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700601 atomic_t tmp_ref;
602 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900603};
604
605struct binder_transaction {
606 int debug_id;
607 struct binder_work work;
608 struct binder_thread *from;
609 struct binder_transaction *from_parent;
610 struct binder_proc *to_proc;
611 struct binder_thread *to_thread;
612 struct binder_transaction *to_parent;
613 unsigned need_reply:1;
614 /* unsigned is_dead:1; */ /* not used at the moment */
615
616 struct binder_buffer *buffer;
617 unsigned int code;
618 unsigned int flags;
619 long priority;
620 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600621 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700622 /**
623 * @lock: protects @from, @to_proc, and @to_thread
624 *
625 * @from, @to_proc, and @to_thread can be set to NULL
626 * during thread teardown
627 */
628 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900629};
630
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700631/**
632 * binder_proc_lock() - Acquire outer lock for given binder_proc
633 * @proc: struct binder_proc to acquire
634 *
635 * Acquires proc->outer_lock. Used to protect binder_ref
636 * structures associated with the given proc.
637 */
638#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
639static void
640_binder_proc_lock(struct binder_proc *proc, int line)
641{
642 binder_debug(BINDER_DEBUG_SPINLOCKS,
643 "%s: line=%d\n", __func__, line);
644 spin_lock(&proc->outer_lock);
645}
646
647/**
648 * binder_proc_unlock() - Release spinlock for given binder_proc
649 * @proc: struct binder_proc to acquire
650 *
651 * Release lock acquired via binder_proc_lock()
652 */
653#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
654static void
655_binder_proc_unlock(struct binder_proc *proc, int line)
656{
657 binder_debug(BINDER_DEBUG_SPINLOCKS,
658 "%s: line=%d\n", __func__, line);
659 spin_unlock(&proc->outer_lock);
660}
661
662/**
663 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
664 * @proc: struct binder_proc to acquire
665 *
666 * Acquires proc->inner_lock. Used to protect todo lists
667 */
668#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
669static void
670_binder_inner_proc_lock(struct binder_proc *proc, int line)
671{
672 binder_debug(BINDER_DEBUG_SPINLOCKS,
673 "%s: line=%d\n", __func__, line);
674 spin_lock(&proc->inner_lock);
675}
676
677/**
678 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
679 * @proc: struct binder_proc to acquire
680 *
681 * Release lock acquired via binder_inner_proc_lock()
682 */
683#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
684static void
685_binder_inner_proc_unlock(struct binder_proc *proc, int line)
686{
687 binder_debug(BINDER_DEBUG_SPINLOCKS,
688 "%s: line=%d\n", __func__, line);
689 spin_unlock(&proc->inner_lock);
690}
691
692/**
693 * binder_node_lock() - Acquire spinlock for given binder_node
694 * @node: struct binder_node to acquire
695 *
696 * Acquires node->lock. Used to protect binder_node fields
697 */
698#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
699static void
700_binder_node_lock(struct binder_node *node, int line)
701{
702 binder_debug(BINDER_DEBUG_SPINLOCKS,
703 "%s: line=%d\n", __func__, line);
704 spin_lock(&node->lock);
705}
706
707/**
708 * binder_node_unlock() - Release spinlock for given binder_proc
709 * @node: struct binder_node to acquire
710 *
711 * Release lock acquired via binder_node_lock()
712 */
713#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
714static void
715_binder_node_unlock(struct binder_node *node, int line)
716{
717 binder_debug(BINDER_DEBUG_SPINLOCKS,
718 "%s: line=%d\n", __func__, line);
719 spin_unlock(&node->lock);
720}
721
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700722/**
723 * binder_node_inner_lock() - Acquire node and inner locks
724 * @node: struct binder_node to acquire
725 *
726 * Acquires node->lock. If node->proc also acquires
727 * proc->inner_lock. Used to protect binder_node fields
728 */
729#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
730static void
731_binder_node_inner_lock(struct binder_node *node, int line)
732{
733 binder_debug(BINDER_DEBUG_SPINLOCKS,
734 "%s: line=%d\n", __func__, line);
735 spin_lock(&node->lock);
736 if (node->proc)
737 binder_inner_proc_lock(node->proc);
738}
739
740/**
741 * binder_node_unlock() - Release node and inner locks
742 * @node: struct binder_node to acquire
743 *
744 * Release lock acquired via binder_node_lock()
745 */
746#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
747static void
748_binder_node_inner_unlock(struct binder_node *node, int line)
749{
750 struct binder_proc *proc = node->proc;
751
752 binder_debug(BINDER_DEBUG_SPINLOCKS,
753 "%s: line=%d\n", __func__, line);
754 if (proc)
755 binder_inner_proc_unlock(proc);
756 spin_unlock(&node->lock);
757}
758
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700759static bool binder_worklist_empty_ilocked(struct list_head *list)
760{
761 return list_empty(list);
762}
763
764/**
765 * binder_worklist_empty() - Check if no items on the work list
766 * @proc: binder_proc associated with list
767 * @list: list to check
768 *
769 * Return: true if there are no items on list, else false
770 */
771static bool binder_worklist_empty(struct binder_proc *proc,
772 struct list_head *list)
773{
774 bool ret;
775
776 binder_inner_proc_lock(proc);
777 ret = binder_worklist_empty_ilocked(list);
778 binder_inner_proc_unlock(proc);
779 return ret;
780}
781
782static void
783binder_enqueue_work_ilocked(struct binder_work *work,
784 struct list_head *target_list)
785{
786 BUG_ON(target_list == NULL);
787 BUG_ON(work->entry.next && !list_empty(&work->entry));
788 list_add_tail(&work->entry, target_list);
789}
790
791/**
792 * binder_enqueue_work() - Add an item to the work list
793 * @proc: binder_proc associated with list
794 * @work: struct binder_work to add to list
795 * @target_list: list to add work to
796 *
797 * Adds the work to the specified list. Asserts that work
798 * is not already on a list.
799 */
800static void
801binder_enqueue_work(struct binder_proc *proc,
802 struct binder_work *work,
803 struct list_head *target_list)
804{
805 binder_inner_proc_lock(proc);
806 binder_enqueue_work_ilocked(work, target_list);
807 binder_inner_proc_unlock(proc);
808}
809
810static void
811binder_dequeue_work_ilocked(struct binder_work *work)
812{
813 list_del_init(&work->entry);
814}
815
816/**
817 * binder_dequeue_work() - Removes an item from the work list
818 * @proc: binder_proc associated with list
819 * @work: struct binder_work to remove from list
820 *
821 * Removes the specified work item from whatever list it is on.
822 * Can safely be called if work is not on any list.
823 */
824static void
825binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
826{
827 binder_inner_proc_lock(proc);
828 binder_dequeue_work_ilocked(work);
829 binder_inner_proc_unlock(proc);
830}
831
832static struct binder_work *binder_dequeue_work_head_ilocked(
833 struct list_head *list)
834{
835 struct binder_work *w;
836
837 w = list_first_entry_or_null(list, struct binder_work, entry);
838 if (w)
839 list_del_init(&w->entry);
840 return w;
841}
842
843/**
844 * binder_dequeue_work_head() - Dequeues the item at head of list
845 * @proc: binder_proc associated with list
846 * @list: list to dequeue head
847 *
848 * Removes the head of the list if there are items on the list
849 *
850 * Return: pointer dequeued binder_work, NULL if list was empty
851 */
852static struct binder_work *binder_dequeue_work_head(
853 struct binder_proc *proc,
854 struct list_head *list)
855{
856 struct binder_work *w;
857
858 binder_inner_proc_lock(proc);
859 w = binder_dequeue_work_head_ilocked(list);
860 binder_inner_proc_unlock(proc);
861 return w;
862}
863
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900864static void
865binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700866static void binder_free_thread(struct binder_thread *thread);
867static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700868static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900869
Sachin Kamatefde99c2012-08-17 16:39:36 +0530870static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900871{
872 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900873 unsigned long rlim_cur;
874 unsigned long irqs;
875
876 if (files == NULL)
877 return -ESRCH;
878
Al Virodcfadfa2012-08-12 17:27:30 -0400879 if (!lock_task_sighand(proc->tsk, &irqs))
880 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900881
Al Virodcfadfa2012-08-12 17:27:30 -0400882 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
883 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900884
Al Virodcfadfa2012-08-12 17:27:30 -0400885 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900886}
887
888/*
889 * copied from fd_install
890 */
891static void task_fd_install(
892 struct binder_proc *proc, unsigned int fd, struct file *file)
893{
Al Virof869e8a2012-08-15 21:06:33 -0400894 if (proc->files)
895 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900896}
897
898/*
899 * copied from sys_close
900 */
901static long task_close_fd(struct binder_proc *proc, unsigned int fd)
902{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900903 int retval;
904
Al Viro483ce1d2012-08-19 12:04:24 -0400905 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900906 return -ESRCH;
907
Al Viro483ce1d2012-08-19 12:04:24 -0400908 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900909 /* can't restart close syscall because file table entry was cleared */
910 if (unlikely(retval == -ERESTARTSYS ||
911 retval == -ERESTARTNOINTR ||
912 retval == -ERESTARTNOHAND ||
913 retval == -ERESTART_RESTARTBLOCK))
914 retval = -EINTR;
915
916 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900917}
918
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700919static inline void binder_lock(const char *tag)
920{
921 trace_binder_lock(tag);
922 mutex_lock(&binder_main_lock);
923 trace_binder_locked(tag);
924}
925
926static inline void binder_unlock(const char *tag)
927{
928 trace_binder_unlock(tag);
929 mutex_unlock(&binder_main_lock);
930}
931
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900932static void binder_set_nice(long nice)
933{
934 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900935
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900936 if (can_nice(current, nice)) {
937 set_user_nice(current, nice);
938 return;
939 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900940 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900941 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530942 "%d: nice value %ld not allowed use %ld instead\n",
943 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900944 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800945 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900946 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530947 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900948}
949
Todd Kjos425d23f2017-06-12 12:07:26 -0700950static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
951 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900952{
953 struct rb_node *n = proc->nodes.rb_node;
954 struct binder_node *node;
955
Todd Kjos425d23f2017-06-12 12:07:26 -0700956 BUG_ON(!spin_is_locked(&proc->inner_lock));
957
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900958 while (n) {
959 node = rb_entry(n, struct binder_node, rb_node);
960
961 if (ptr < node->ptr)
962 n = n->rb_left;
963 else if (ptr > node->ptr)
964 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -0700965 else {
966 /*
967 * take an implicit weak reference
968 * to ensure node stays alive until
969 * call to binder_put_node()
970 */
Todd Kjos425d23f2017-06-12 12:07:26 -0700971 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900972 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -0700973 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900974 }
975 return NULL;
976}
977
Todd Kjos425d23f2017-06-12 12:07:26 -0700978static struct binder_node *binder_get_node(struct binder_proc *proc,
979 binder_uintptr_t ptr)
980{
981 struct binder_node *node;
982
983 binder_inner_proc_lock(proc);
984 node = binder_get_node_ilocked(proc, ptr);
985 binder_inner_proc_unlock(proc);
986 return node;
987}
988
989static struct binder_node *binder_init_node_ilocked(
990 struct binder_proc *proc,
991 struct binder_node *new_node,
992 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900993{
994 struct rb_node **p = &proc->nodes.rb_node;
995 struct rb_node *parent = NULL;
996 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700997 binder_uintptr_t ptr = fp ? fp->binder : 0;
998 binder_uintptr_t cookie = fp ? fp->cookie : 0;
999 __u32 flags = fp ? fp->flags : 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001000
Todd Kjos425d23f2017-06-12 12:07:26 -07001001 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001002 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001003
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001004 parent = *p;
1005 node = rb_entry(parent, struct binder_node, rb_node);
1006
1007 if (ptr < node->ptr)
1008 p = &(*p)->rb_left;
1009 else if (ptr > node->ptr)
1010 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001011 else {
1012 /*
1013 * A matching node is already in
1014 * the rb tree. Abandon the init
1015 * and return it.
1016 */
1017 binder_inc_node_tmpref_ilocked(node);
1018 return node;
1019 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001020 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001021 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001022 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001023 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001024 rb_link_node(&node->rb_node, parent, p);
1025 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001026 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001027 node->proc = proc;
1028 node->ptr = ptr;
1029 node->cookie = cookie;
1030 node->work.type = BINDER_WORK_NODE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001031 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1032 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001033 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001034 INIT_LIST_HEAD(&node->work.entry);
1035 INIT_LIST_HEAD(&node->async_todo);
1036 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001037 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001038 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001039 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001040
1041 return node;
1042}
1043
1044static struct binder_node *binder_new_node(struct binder_proc *proc,
1045 struct flat_binder_object *fp)
1046{
1047 struct binder_node *node;
1048 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1049
1050 if (!new_node)
1051 return NULL;
1052 binder_inner_proc_lock(proc);
1053 node = binder_init_node_ilocked(proc, new_node, fp);
1054 binder_inner_proc_unlock(proc);
1055 if (node != new_node)
1056 /*
1057 * The node was already added by another thread
1058 */
1059 kfree(new_node);
1060
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001061 return node;
1062}
1063
Todd Kjose7f23ed2017-03-21 13:06:01 -07001064static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001065{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001066 kfree(node);
1067 binder_stats_deleted(BINDER_STAT_NODE);
1068}
1069
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001070static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1071 int internal,
1072 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001073{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001074 struct binder_proc *proc = node->proc;
1075
1076 BUG_ON(!spin_is_locked(&node->lock));
1077 if (proc)
1078 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001079 if (strong) {
1080 if (internal) {
1081 if (target_list == NULL &&
1082 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001083 !(node->proc &&
1084 node == node->proc->context->
1085 binder_context_mgr_node &&
1086 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301087 pr_err("invalid inc strong node for %d\n",
1088 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001089 return -EINVAL;
1090 }
1091 node->internal_strong_refs++;
1092 } else
1093 node->local_strong_refs++;
1094 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001095 binder_dequeue_work_ilocked(&node->work);
1096 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001097 }
1098 } else {
1099 if (!internal)
1100 node->local_weak_refs++;
1101 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1102 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301103 pr_err("invalid inc weak node for %d\n",
1104 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001105 return -EINVAL;
1106 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001107 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001108 }
1109 }
1110 return 0;
1111}
1112
Todd Kjose7f23ed2017-03-21 13:06:01 -07001113static int binder_inc_node(struct binder_node *node, int strong, int internal,
1114 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001115{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001116 int ret;
1117
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001118 binder_node_inner_lock(node);
1119 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1120 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001121
1122 return ret;
1123}
1124
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001125static bool binder_dec_node_nilocked(struct binder_node *node,
1126 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001127{
1128 struct binder_proc *proc = node->proc;
1129
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001130 BUG_ON(!spin_is_locked(&node->lock));
Todd Kjose7f23ed2017-03-21 13:06:01 -07001131 if (proc)
1132 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001133 if (strong) {
1134 if (internal)
1135 node->internal_strong_refs--;
1136 else
1137 node->local_strong_refs--;
1138 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001139 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001140 } else {
1141 if (!internal)
1142 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001143 if (node->local_weak_refs || node->tmp_refs ||
1144 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001145 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001146 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001147
1148 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001149 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001150 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001151 wake_up_interruptible(&node->proc->wait);
1152 }
1153 } else {
1154 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001155 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001156 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001157 binder_dequeue_work_ilocked(&node->work);
1158 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001159 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301160 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001161 node->debug_id);
1162 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001163 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001164 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001165 /*
1166 * tmp_refs could have changed so
1167 * check it again
1168 */
1169 if (node->tmp_refs) {
1170 spin_unlock(&binder_dead_nodes_lock);
1171 return false;
1172 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001173 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001174 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001175 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301176 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001177 node->debug_id);
1178 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001179 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001180 }
1181 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001182 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001183}
1184
Todd Kjose7f23ed2017-03-21 13:06:01 -07001185static void binder_dec_node(struct binder_node *node, int strong, int internal)
1186{
1187 bool free_node;
1188
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001189 binder_node_inner_lock(node);
1190 free_node = binder_dec_node_nilocked(node, strong, internal);
1191 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001192 if (free_node)
1193 binder_free_node(node);
1194}
1195
1196static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001197{
1198 /*
1199 * No call to binder_inc_node() is needed since we
1200 * don't need to inform userspace of any changes to
1201 * tmp_refs
1202 */
1203 node->tmp_refs++;
1204}
1205
1206/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001207 * binder_inc_node_tmpref() - take a temporary reference on node
1208 * @node: node to reference
1209 *
1210 * Take reference on node to prevent the node from being freed
1211 * while referenced only by a local variable. The inner lock is
1212 * needed to serialize with the node work on the queue (which
1213 * isn't needed after the node is dead). If the node is dead
1214 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1215 * node->tmp_refs against dead-node-only cases where the node
1216 * lock cannot be acquired (eg traversing the dead node list to
1217 * print nodes)
1218 */
1219static void binder_inc_node_tmpref(struct binder_node *node)
1220{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001221 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001222 if (node->proc)
1223 binder_inner_proc_lock(node->proc);
1224 else
1225 spin_lock(&binder_dead_nodes_lock);
1226 binder_inc_node_tmpref_ilocked(node);
1227 if (node->proc)
1228 binder_inner_proc_unlock(node->proc);
1229 else
1230 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001231 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001232}
1233
1234/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001235 * binder_dec_node_tmpref() - remove a temporary reference on node
1236 * @node: node to reference
1237 *
1238 * Release temporary reference on node taken via binder_inc_node_tmpref()
1239 */
1240static void binder_dec_node_tmpref(struct binder_node *node)
1241{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001242 bool free_node;
1243
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001244 binder_node_inner_lock(node);
1245 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001246 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001247 node->tmp_refs--;
1248 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001249 if (!node->proc)
1250 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001251 /*
1252 * Call binder_dec_node() to check if all refcounts are 0
1253 * and cleanup is needed. Calling with strong=0 and internal=1
1254 * causes no actual reference to be released in binder_dec_node().
1255 * If that changes, a change is needed here too.
1256 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001257 free_node = binder_dec_node_nilocked(node, 0, 1);
1258 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001259 if (free_node)
1260 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001261}
1262
1263static void binder_put_node(struct binder_node *node)
1264{
1265 binder_dec_node_tmpref(node);
1266}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001267
1268static struct binder_ref *binder_get_ref(struct binder_proc *proc,
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001269 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001270{
1271 struct rb_node *n = proc->refs_by_desc.rb_node;
1272 struct binder_ref *ref;
1273
1274 while (n) {
1275 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1276
Todd Kjosb0117bb2017-05-08 09:16:27 -07001277 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001278 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001279 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001280 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001281 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001282 binder_user_error("tried to use weak ref as strong ref\n");
1283 return NULL;
1284 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001285 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001286 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001287 }
1288 return NULL;
1289}
1290
Todd Kjosb0117bb2017-05-08 09:16:27 -07001291/**
1292 * binder_get_ref_for_node() - get the ref associated with given node
1293 * @proc: binder_proc that owns the ref
1294 * @node: binder_node of target
1295 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1296 *
1297 * Look up the ref for the given node and return it if it exists
1298 *
1299 * If it doesn't exist and the caller provides a newly allocated
1300 * ref, initialize the fields of the newly allocated ref and insert
1301 * into the given proc rb_trees and node refs list.
1302 *
1303 * Return: the ref for node. It is possible that another thread
1304 * allocated/initialized the ref first in which case the
1305 * returned ref would be different than the passed-in
1306 * new_ref. new_ref must be kfree'd by the caller in
1307 * this case.
1308 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001309static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
Todd Kjosb0117bb2017-05-08 09:16:27 -07001310 struct binder_node *node,
1311 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001312{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001313 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001314 struct rb_node **p = &proc->refs_by_node.rb_node;
1315 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001316 struct binder_ref *ref;
1317 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001318
1319 while (*p) {
1320 parent = *p;
1321 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1322
1323 if (node < ref->node)
1324 p = &(*p)->rb_left;
1325 else if (node > ref->node)
1326 p = &(*p)->rb_right;
1327 else
1328 return ref;
1329 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001330 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001331 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001332
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001333 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001334 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001335 new_ref->proc = proc;
1336 new_ref->node = node;
1337 rb_link_node(&new_ref->rb_node_node, parent, p);
1338 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1339
Todd Kjosb0117bb2017-05-08 09:16:27 -07001340 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001341 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1342 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001343 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001344 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001345 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001346 }
1347
1348 p = &proc->refs_by_desc.rb_node;
1349 while (*p) {
1350 parent = *p;
1351 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1352
Todd Kjosb0117bb2017-05-08 09:16:27 -07001353 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001354 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001355 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001356 p = &(*p)->rb_right;
1357 else
1358 BUG();
1359 }
1360 rb_link_node(&new_ref->rb_node_desc, parent, p);
1361 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001362
1363 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001364 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001365
Todd Kjos4cbe5752017-05-01 17:21:51 -07001366 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1367 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001368 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001369 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001370 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001371 return new_ref;
1372}
1373
Todd Kjosb0117bb2017-05-08 09:16:27 -07001374static void binder_cleanup_ref(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001375{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001376 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001377
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001378 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301379 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001380 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301381 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001382
1383 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1384 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001385
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001386 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001387 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001388 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001389
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001390 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001391 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1392 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001393 /*
1394 * Clear ref->node unless we want the caller to free the node
1395 */
1396 if (!delete_node) {
1397 /*
1398 * The caller uses ref->node to determine
1399 * whether the node needs to be freed. Clear
1400 * it since the node is still alive.
1401 */
1402 ref->node = NULL;
1403 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001404
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001405 if (ref->death) {
1406 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301407 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001408 ref->proc->pid, ref->data.debug_id,
1409 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001410 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001411 binder_stats_deleted(BINDER_STAT_DEATH);
1412 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001413 binder_stats_deleted(BINDER_STAT_REF);
1414}
1415
Todd Kjosb0117bb2017-05-08 09:16:27 -07001416/**
1417 * binder_inc_ref() - increment the ref for given handle
1418 * @ref: ref to be incremented
1419 * @strong: if true, strong increment, else weak
1420 * @target_list: list to queue node work on
1421 *
1422 * Increment the ref.
1423 *
1424 * Return: 0, if successful, else errno
1425 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001426static int binder_inc_ref(struct binder_ref *ref, int strong,
1427 struct list_head *target_list)
1428{
1429 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001430
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001431 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001432 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001433 ret = binder_inc_node(ref->node, 1, 1, target_list);
1434 if (ret)
1435 return ret;
1436 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001437 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001438 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001439 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001440 ret = binder_inc_node(ref->node, 0, 1, target_list);
1441 if (ret)
1442 return ret;
1443 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001444 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001445 }
1446 return 0;
1447}
1448
Todd Kjosb0117bb2017-05-08 09:16:27 -07001449/**
1450 * binder_dec_ref() - dec the ref for given handle
1451 * @ref: ref to be decremented
1452 * @strong: if true, strong decrement, else weak
1453 *
1454 * Decrement the ref.
1455 *
1456 * TODO: kfree is avoided here since an upcoming patch
1457 * will put this under a lock.
1458 *
1459 * Return: true if ref is cleaned up and ready to be freed
1460 */
1461static bool binder_dec_ref(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001462{
1463 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001464 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301465 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001466 ref->proc->pid, ref->data.debug_id,
1467 ref->data.desc, ref->data.strong,
1468 ref->data.weak);
1469 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001470 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001471 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001472 if (ref->data.strong == 0)
1473 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001474 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001475 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301476 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001477 ref->proc->pid, ref->data.debug_id,
1478 ref->data.desc, ref->data.strong,
1479 ref->data.weak);
1480 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001481 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001482 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001483 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001484 if (ref->data.strong == 0 && ref->data.weak == 0) {
1485 binder_cleanup_ref(ref);
1486 /*
1487 * TODO: we could kfree(ref) here, but an upcoming
1488 * patch will call this with a lock held, so we
1489 * return an indication that the ref should be
1490 * freed.
1491 */
1492 return true;
1493 }
1494 return false;
1495}
1496
1497/**
1498 * binder_get_node_from_ref() - get the node from the given proc/desc
1499 * @proc: proc containing the ref
1500 * @desc: the handle associated with the ref
1501 * @need_strong_ref: if true, only return node if ref is strong
1502 * @rdata: the id/refcount data for the ref
1503 *
1504 * Given a proc and ref handle, return the associated binder_node
1505 *
1506 * Return: a binder_node or NULL if not found or not strong when strong required
1507 */
1508static struct binder_node *binder_get_node_from_ref(
1509 struct binder_proc *proc,
1510 u32 desc, bool need_strong_ref,
1511 struct binder_ref_data *rdata)
1512{
1513 struct binder_node *node;
1514 struct binder_ref *ref;
1515
1516 ref = binder_get_ref(proc, desc, need_strong_ref);
1517 if (!ref)
1518 goto err_no_ref;
1519 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001520 /*
1521 * Take an implicit reference on the node to ensure
1522 * it stays alive until the call to binder_put_node()
1523 */
1524 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001525 if (rdata)
1526 *rdata = ref->data;
1527
1528 return node;
1529
1530err_no_ref:
1531 return NULL;
1532}
1533
1534/**
1535 * binder_free_ref() - free the binder_ref
1536 * @ref: ref to free
1537 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001538 * Free the binder_ref. Free the binder_node indicated by ref->node
1539 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001540 */
1541static void binder_free_ref(struct binder_ref *ref)
1542{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001543 if (ref->node)
1544 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001545 kfree(ref->death);
1546 kfree(ref);
1547}
1548
1549/**
1550 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1551 * @proc: proc containing the ref
1552 * @desc: the handle associated with the ref
1553 * @increment: true=inc reference, false=dec reference
1554 * @strong: true=strong reference, false=weak reference
1555 * @rdata: the id/refcount data for the ref
1556 *
1557 * Given a proc and ref handle, increment or decrement the ref
1558 * according to "increment" arg.
1559 *
1560 * Return: 0 if successful, else errno
1561 */
1562static int binder_update_ref_for_handle(struct binder_proc *proc,
1563 uint32_t desc, bool increment, bool strong,
1564 struct binder_ref_data *rdata)
1565{
1566 int ret = 0;
1567 struct binder_ref *ref;
1568 bool delete_ref = false;
1569
1570 ref = binder_get_ref(proc, desc, strong);
1571 if (!ref) {
1572 ret = -EINVAL;
1573 goto err_no_ref;
1574 }
1575 if (increment)
1576 ret = binder_inc_ref(ref, strong, NULL);
1577 else
1578 delete_ref = binder_dec_ref(ref, strong);
1579
1580 if (rdata)
1581 *rdata = ref->data;
1582
1583 if (delete_ref)
1584 binder_free_ref(ref);
1585 return ret;
1586
1587err_no_ref:
1588 return ret;
1589}
1590
1591/**
1592 * binder_dec_ref_for_handle() - dec the ref for given handle
1593 * @proc: proc containing the ref
1594 * @desc: the handle associated with the ref
1595 * @strong: true=strong reference, false=weak reference
1596 * @rdata: the id/refcount data for the ref
1597 *
1598 * Just calls binder_update_ref_for_handle() to decrement the ref.
1599 *
1600 * Return: 0 if successful, else errno
1601 */
1602static int binder_dec_ref_for_handle(struct binder_proc *proc,
1603 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1604{
1605 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1606}
1607
1608
1609/**
1610 * binder_inc_ref_for_node() - increment the ref for given proc/node
1611 * @proc: proc containing the ref
1612 * @node: target node
1613 * @strong: true=strong reference, false=weak reference
1614 * @target_list: worklist to use if node is incremented
1615 * @rdata: the id/refcount data for the ref
1616 *
1617 * Given a proc and node, increment the ref. Create the ref if it
1618 * doesn't already exist
1619 *
1620 * Return: 0 if successful, else errno
1621 */
1622static int binder_inc_ref_for_node(struct binder_proc *proc,
1623 struct binder_node *node,
1624 bool strong,
1625 struct list_head *target_list,
1626 struct binder_ref_data *rdata)
1627{
1628 struct binder_ref *ref;
1629 struct binder_ref *new_ref = NULL;
1630 int ret = 0;
1631
1632 ref = binder_get_ref_for_node(proc, node, NULL);
1633 if (!ref) {
1634 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1635 if (!new_ref)
1636 return -ENOMEM;
1637 ref = binder_get_ref_for_node(proc, node, new_ref);
1638 }
1639 ret = binder_inc_ref(ref, strong, target_list);
1640 *rdata = ref->data;
1641 if (new_ref && ref != new_ref)
1642 /*
1643 * Another thread created the ref first so
1644 * free the one we allocated
1645 */
1646 kfree(new_ref);
1647 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001648}
1649
Martijn Coenen995a36e2017-06-02 13:36:52 -07001650static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1651 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001652{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001653 BUG_ON(!target_thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07001654 BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
Todd Kjos21ef40a2017-03-30 18:02:13 -07001655 BUG_ON(target_thread->transaction_stack != t);
1656 BUG_ON(target_thread->transaction_stack->from != target_thread);
1657 target_thread->transaction_stack =
1658 target_thread->transaction_stack->from_parent;
1659 t->from = NULL;
1660}
1661
Todd Kjos2f993e22017-05-12 14:42:55 -07001662/**
1663 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1664 * @thread: thread to decrement
1665 *
1666 * A thread needs to be kept alive while being used to create or
1667 * handle a transaction. binder_get_txn_from() is used to safely
1668 * extract t->from from a binder_transaction and keep the thread
1669 * indicated by t->from from being freed. When done with that
1670 * binder_thread, this function is called to decrement the
1671 * tmp_ref and free if appropriate (thread has been released
1672 * and no transaction being processed by the driver)
1673 */
1674static void binder_thread_dec_tmpref(struct binder_thread *thread)
1675{
1676 /*
1677 * atomic is used to protect the counter value while
1678 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07001679 */
Todd Kjosb4827902017-05-25 15:52:17 -07001680 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001681 atomic_dec(&thread->tmp_ref);
1682 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07001683 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001684 binder_free_thread(thread);
1685 return;
1686 }
Todd Kjosb4827902017-05-25 15:52:17 -07001687 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001688}
1689
1690/**
1691 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1692 * @proc: proc to decrement
1693 *
1694 * A binder_proc needs to be kept alive while being used to create or
1695 * handle a transaction. proc->tmp_ref is incremented when
1696 * creating a new transaction or the binder_proc is currently in-use
1697 * by threads that are being released. When done with the binder_proc,
1698 * this function is called to decrement the counter and free the
1699 * proc if appropriate (proc has been released, all threads have
1700 * been released and not currenly in-use to process a transaction).
1701 */
1702static void binder_proc_dec_tmpref(struct binder_proc *proc)
1703{
Todd Kjosb4827902017-05-25 15:52:17 -07001704 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001705 proc->tmp_ref--;
1706 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1707 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07001708 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001709 binder_free_proc(proc);
1710 return;
1711 }
Todd Kjosb4827902017-05-25 15:52:17 -07001712 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001713}
1714
1715/**
1716 * binder_get_txn_from() - safely extract the "from" thread in transaction
1717 * @t: binder transaction for t->from
1718 *
1719 * Atomically return the "from" thread and increment the tmp_ref
1720 * count for the thread to ensure it stays alive until
1721 * binder_thread_dec_tmpref() is called.
1722 *
1723 * Return: the value of t->from
1724 */
1725static struct binder_thread *binder_get_txn_from(
1726 struct binder_transaction *t)
1727{
1728 struct binder_thread *from;
1729
1730 spin_lock(&t->lock);
1731 from = t->from;
1732 if (from)
1733 atomic_inc(&from->tmp_ref);
1734 spin_unlock(&t->lock);
1735 return from;
1736}
1737
Martijn Coenen995a36e2017-06-02 13:36:52 -07001738/**
1739 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1740 * @t: binder transaction for t->from
1741 *
1742 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1743 * to guarantee that the thread cannot be released while operating on it.
1744 * The caller must call binder_inner_proc_unlock() to release the inner lock
1745 * as well as call binder_dec_thread_txn() to release the reference.
1746 *
1747 * Return: the value of t->from
1748 */
1749static struct binder_thread *binder_get_txn_from_and_acq_inner(
1750 struct binder_transaction *t)
1751{
1752 struct binder_thread *from;
1753
1754 from = binder_get_txn_from(t);
1755 if (!from)
1756 return NULL;
1757 binder_inner_proc_lock(from->proc);
1758 if (t->from) {
1759 BUG_ON(from != t->from);
1760 return from;
1761 }
1762 binder_inner_proc_unlock(from->proc);
1763 binder_thread_dec_tmpref(from);
1764 return NULL;
1765}
1766
Todd Kjos21ef40a2017-03-30 18:02:13 -07001767static void binder_free_transaction(struct binder_transaction *t)
1768{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001769 if (t->buffer)
1770 t->buffer->transaction = NULL;
1771 kfree(t);
1772 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1773}
1774
1775static void binder_send_failed_reply(struct binder_transaction *t,
1776 uint32_t error_code)
1777{
1778 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001779 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001780
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001781 BUG_ON(t->flags & TF_ONE_WAY);
1782 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07001783 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001784 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07001785 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1786 "send failed reply for transaction %d to %d:%d\n",
1787 t->debug_id,
1788 target_thread->proc->pid,
1789 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001790
Martijn Coenen995a36e2017-06-02 13:36:52 -07001791 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07001792 if (target_thread->reply_error.cmd == BR_OK) {
1793 target_thread->reply_error.cmd = error_code;
Martijn Coenen995a36e2017-06-02 13:36:52 -07001794 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001795 &target_thread->reply_error.work,
Todd Kjos858b8da2017-04-21 17:35:12 -07001796 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001797 wake_up_interruptible(&target_thread->wait);
1798 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07001799 WARN(1, "Unexpected reply error: %u\n",
1800 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001801 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07001802 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001803 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07001804 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001805 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001806 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001807 next = t->from_parent;
1808
1809 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1810 "send failed reply for transaction %d, target dead\n",
1811 t->debug_id);
1812
Todd Kjos21ef40a2017-03-30 18:02:13 -07001813 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001814 if (next == NULL) {
1815 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1816 "reply failed, no target thread at root\n");
1817 return;
1818 }
1819 t = next;
1820 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1821 "reply failed, no target thread -- retry %d\n",
1822 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001823 }
1824}
1825
Martijn Coenen00c80372016-07-13 12:06:49 +02001826/**
1827 * binder_validate_object() - checks for a valid metadata object in a buffer.
1828 * @buffer: binder_buffer that we're parsing.
1829 * @offset: offset in the buffer at which to validate an object.
1830 *
1831 * Return: If there's a valid metadata object at @offset in @buffer, the
1832 * size of that object. Otherwise, it returns zero.
1833 */
1834static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1835{
1836 /* Check if we can read a header first */
1837 struct binder_object_header *hdr;
1838 size_t object_size = 0;
1839
1840 if (offset > buffer->data_size - sizeof(*hdr) ||
1841 buffer->data_size < sizeof(*hdr) ||
1842 !IS_ALIGNED(offset, sizeof(u32)))
1843 return 0;
1844
1845 /* Ok, now see if we can read a complete object. */
1846 hdr = (struct binder_object_header *)(buffer->data + offset);
1847 switch (hdr->type) {
1848 case BINDER_TYPE_BINDER:
1849 case BINDER_TYPE_WEAK_BINDER:
1850 case BINDER_TYPE_HANDLE:
1851 case BINDER_TYPE_WEAK_HANDLE:
1852 object_size = sizeof(struct flat_binder_object);
1853 break;
1854 case BINDER_TYPE_FD:
1855 object_size = sizeof(struct binder_fd_object);
1856 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02001857 case BINDER_TYPE_PTR:
1858 object_size = sizeof(struct binder_buffer_object);
1859 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02001860 case BINDER_TYPE_FDA:
1861 object_size = sizeof(struct binder_fd_array_object);
1862 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02001863 default:
1864 return 0;
1865 }
1866 if (offset <= buffer->data_size - object_size &&
1867 buffer->data_size >= object_size)
1868 return object_size;
1869 else
1870 return 0;
1871}
1872
Martijn Coenen5a6da532016-09-30 14:10:07 +02001873/**
1874 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1875 * @b: binder_buffer containing the object
1876 * @index: index in offset array at which the binder_buffer_object is
1877 * located
1878 * @start: points to the start of the offset array
1879 * @num_valid: the number of valid offsets in the offset array
1880 *
1881 * Return: If @index is within the valid range of the offset array
1882 * described by @start and @num_valid, and if there's a valid
1883 * binder_buffer_object at the offset found in index @index
1884 * of the offset array, that object is returned. Otherwise,
1885 * %NULL is returned.
1886 * Note that the offset found in index @index itself is not
1887 * verified; this function assumes that @num_valid elements
1888 * from @start were previously verified to have valid offsets.
1889 */
1890static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1891 binder_size_t index,
1892 binder_size_t *start,
1893 binder_size_t num_valid)
1894{
1895 struct binder_buffer_object *buffer_obj;
1896 binder_size_t *offp;
1897
1898 if (index >= num_valid)
1899 return NULL;
1900
1901 offp = start + index;
1902 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1903 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1904 return NULL;
1905
1906 return buffer_obj;
1907}
1908
1909/**
1910 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1911 * @b: transaction buffer
1912 * @objects_start start of objects buffer
1913 * @buffer: binder_buffer_object in which to fix up
1914 * @offset: start offset in @buffer to fix up
1915 * @last_obj: last binder_buffer_object that we fixed up in
1916 * @last_min_offset: minimum fixup offset in @last_obj
1917 *
1918 * Return: %true if a fixup in buffer @buffer at offset @offset is
1919 * allowed.
1920 *
1921 * For safety reasons, we only allow fixups inside a buffer to happen
1922 * at increasing offsets; additionally, we only allow fixup on the last
1923 * buffer object that was verified, or one of its parents.
1924 *
1925 * Example of what is allowed:
1926 *
1927 * A
1928 * B (parent = A, offset = 0)
1929 * C (parent = A, offset = 16)
1930 * D (parent = C, offset = 0)
1931 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1932 *
1933 * Examples of what is not allowed:
1934 *
1935 * Decreasing offsets within the same parent:
1936 * A
1937 * C (parent = A, offset = 16)
1938 * B (parent = A, offset = 0) // decreasing offset within A
1939 *
1940 * Referring to a parent that wasn't the last object or any of its parents:
1941 * A
1942 * B (parent = A, offset = 0)
1943 * C (parent = A, offset = 0)
1944 * C (parent = A, offset = 16)
1945 * D (parent = B, offset = 0) // B is not A or any of A's parents
1946 */
1947static bool binder_validate_fixup(struct binder_buffer *b,
1948 binder_size_t *objects_start,
1949 struct binder_buffer_object *buffer,
1950 binder_size_t fixup_offset,
1951 struct binder_buffer_object *last_obj,
1952 binder_size_t last_min_offset)
1953{
1954 if (!last_obj) {
1955 /* Nothing to fix up in */
1956 return false;
1957 }
1958
1959 while (last_obj != buffer) {
1960 /*
1961 * Safe to retrieve the parent of last_obj, since it
1962 * was already previously verified by the driver.
1963 */
1964 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1965 return false;
1966 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1967 last_obj = (struct binder_buffer_object *)
1968 (b->data + *(objects_start + last_obj->parent));
1969 }
1970 return (fixup_offset >= last_min_offset);
1971}
1972
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001973static void binder_transaction_buffer_release(struct binder_proc *proc,
1974 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001975 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001976{
Martijn Coenen5a6da532016-09-30 14:10:07 +02001977 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001978 int debug_id = buffer->debug_id;
1979
1980 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301981 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001982 proc->pid, buffer->debug_id,
1983 buffer->data_size, buffer->offsets_size, failed_at);
1984
1985 if (buffer->target_node)
1986 binder_dec_node(buffer->target_node, 1, 0);
1987
Martijn Coenen5a6da532016-09-30 14:10:07 +02001988 off_start = (binder_size_t *)(buffer->data +
1989 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001990 if (failed_at)
1991 off_end = failed_at;
1992 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02001993 off_end = (void *)off_start + buffer->offsets_size;
1994 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02001995 struct binder_object_header *hdr;
1996 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001997
Martijn Coenen00c80372016-07-13 12:06:49 +02001998 if (object_size == 0) {
1999 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002000 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002001 continue;
2002 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002003 hdr = (struct binder_object_header *)(buffer->data + *offp);
2004 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002005 case BINDER_TYPE_BINDER:
2006 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002007 struct flat_binder_object *fp;
2008 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002009
Martijn Coenen00c80372016-07-13 12:06:49 +02002010 fp = to_flat_binder_object(hdr);
2011 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002012 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002013 pr_err("transaction release %d bad node %016llx\n",
2014 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002015 break;
2016 }
2017 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002018 " node %d u%016llx\n",
2019 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002020 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2021 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002022 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002023 } break;
2024 case BINDER_TYPE_HANDLE:
2025 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002026 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002027 struct binder_ref_data rdata;
2028 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002029
Martijn Coenen00c80372016-07-13 12:06:49 +02002030 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002031 ret = binder_dec_ref_for_handle(proc, fp->handle,
2032 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2033
2034 if (ret) {
2035 pr_err("transaction release %d bad handle %d, ret = %d\n",
2036 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002037 break;
2038 }
2039 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002040 " ref %d desc %d\n",
2041 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002042 } break;
2043
Martijn Coenen00c80372016-07-13 12:06:49 +02002044 case BINDER_TYPE_FD: {
2045 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2046
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002047 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002048 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002049 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002050 task_close_fd(proc, fp->fd);
2051 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002052 case BINDER_TYPE_PTR:
2053 /*
2054 * Nothing to do here, this will get cleaned up when the
2055 * transaction buffer gets freed
2056 */
2057 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002058 case BINDER_TYPE_FDA: {
2059 struct binder_fd_array_object *fda;
2060 struct binder_buffer_object *parent;
2061 uintptr_t parent_buffer;
2062 u32 *fd_array;
2063 size_t fd_index;
2064 binder_size_t fd_buf_size;
2065
2066 fda = to_binder_fd_array_object(hdr);
2067 parent = binder_validate_ptr(buffer, fda->parent,
2068 off_start,
2069 offp - off_start);
2070 if (!parent) {
2071 pr_err("transaction release %d bad parent offset",
2072 debug_id);
2073 continue;
2074 }
2075 /*
2076 * Since the parent was already fixed up, convert it
2077 * back to kernel address space to access it
2078 */
2079 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002080 binder_alloc_get_user_buffer_offset(
2081 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002082
2083 fd_buf_size = sizeof(u32) * fda->num_fds;
2084 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2085 pr_err("transaction release %d invalid number of fds (%lld)\n",
2086 debug_id, (u64)fda->num_fds);
2087 continue;
2088 }
2089 if (fd_buf_size > parent->length ||
2090 fda->parent_offset > parent->length - fd_buf_size) {
2091 /* No space for all file descriptors here. */
2092 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2093 debug_id, (u64)fda->num_fds);
2094 continue;
2095 }
2096 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2097 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2098 task_close_fd(proc, fd_array[fd_index]);
2099 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002100 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002101 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002102 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002103 break;
2104 }
2105 }
2106}
2107
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002108static int binder_translate_binder(struct flat_binder_object *fp,
2109 struct binder_transaction *t,
2110 struct binder_thread *thread)
2111{
2112 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002113 struct binder_proc *proc = thread->proc;
2114 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002115 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002116 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002117
2118 node = binder_get_node(proc, fp->binder);
2119 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002120 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002121 if (!node)
2122 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002123 }
2124 if (fp->cookie != node->cookie) {
2125 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2126 proc->pid, thread->pid, (u64)fp->binder,
2127 node->debug_id, (u64)fp->cookie,
2128 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002129 ret = -EINVAL;
2130 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002131 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002132 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2133 ret = -EPERM;
2134 goto done;
2135 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002136
Todd Kjosb0117bb2017-05-08 09:16:27 -07002137 ret = binder_inc_ref_for_node(target_proc, node,
2138 fp->hdr.type == BINDER_TYPE_BINDER,
2139 &thread->todo, &rdata);
2140 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002141 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002142
2143 if (fp->hdr.type == BINDER_TYPE_BINDER)
2144 fp->hdr.type = BINDER_TYPE_HANDLE;
2145 else
2146 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2147 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002148 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002149 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002150
Todd Kjosb0117bb2017-05-08 09:16:27 -07002151 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002152 binder_debug(BINDER_DEBUG_TRANSACTION,
2153 " node %d u%016llx -> ref %d desc %d\n",
2154 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002155 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002156done:
2157 binder_put_node(node);
2158 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002159}
2160
2161static int binder_translate_handle(struct flat_binder_object *fp,
2162 struct binder_transaction *t,
2163 struct binder_thread *thread)
2164{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002165 struct binder_proc *proc = thread->proc;
2166 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002167 struct binder_node *node;
2168 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002169 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002170
Todd Kjosb0117bb2017-05-08 09:16:27 -07002171 node = binder_get_node_from_ref(proc, fp->handle,
2172 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2173 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002174 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2175 proc->pid, thread->pid, fp->handle);
2176 return -EINVAL;
2177 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002178 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2179 ret = -EPERM;
2180 goto done;
2181 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002182
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002183 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002184 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002185 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2186 fp->hdr.type = BINDER_TYPE_BINDER;
2187 else
2188 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002189 fp->binder = node->ptr;
2190 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002191 if (node->proc)
2192 binder_inner_proc_lock(node->proc);
2193 binder_inc_node_nilocked(node,
2194 fp->hdr.type == BINDER_TYPE_BINDER,
2195 0, NULL);
2196 if (node->proc)
2197 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002198 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002199 binder_debug(BINDER_DEBUG_TRANSACTION,
2200 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002201 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2202 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002203 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002204 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002205 int ret;
2206 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002207
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002208 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002209 ret = binder_inc_ref_for_node(target_proc, node,
2210 fp->hdr.type == BINDER_TYPE_HANDLE,
2211 NULL, &dest_rdata);
2212 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002213 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002214
2215 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002216 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002217 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002218 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2219 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002220 binder_debug(BINDER_DEBUG_TRANSACTION,
2221 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002222 src_rdata.debug_id, src_rdata.desc,
2223 dest_rdata.debug_id, dest_rdata.desc,
2224 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002225 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002226done:
2227 binder_put_node(node);
2228 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002229}
2230
2231static int binder_translate_fd(int fd,
2232 struct binder_transaction *t,
2233 struct binder_thread *thread,
2234 struct binder_transaction *in_reply_to)
2235{
2236 struct binder_proc *proc = thread->proc;
2237 struct binder_proc *target_proc = t->to_proc;
2238 int target_fd;
2239 struct file *file;
2240 int ret;
2241 bool target_allows_fd;
2242
2243 if (in_reply_to)
2244 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2245 else
2246 target_allows_fd = t->buffer->target_node->accept_fds;
2247 if (!target_allows_fd) {
2248 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2249 proc->pid, thread->pid,
2250 in_reply_to ? "reply" : "transaction",
2251 fd);
2252 ret = -EPERM;
2253 goto err_fd_not_accepted;
2254 }
2255
2256 file = fget(fd);
2257 if (!file) {
2258 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2259 proc->pid, thread->pid, fd);
2260 ret = -EBADF;
2261 goto err_fget;
2262 }
2263 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2264 if (ret < 0) {
2265 ret = -EPERM;
2266 goto err_security;
2267 }
2268
2269 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2270 if (target_fd < 0) {
2271 ret = -ENOMEM;
2272 goto err_get_unused_fd;
2273 }
2274 task_fd_install(target_proc, target_fd, file);
2275 trace_binder_transaction_fd(t, fd, target_fd);
2276 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2277 fd, target_fd);
2278
2279 return target_fd;
2280
2281err_get_unused_fd:
2282err_security:
2283 fput(file);
2284err_fget:
2285err_fd_not_accepted:
2286 return ret;
2287}
2288
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002289static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2290 struct binder_buffer_object *parent,
2291 struct binder_transaction *t,
2292 struct binder_thread *thread,
2293 struct binder_transaction *in_reply_to)
2294{
2295 binder_size_t fdi, fd_buf_size, num_installed_fds;
2296 int target_fd;
2297 uintptr_t parent_buffer;
2298 u32 *fd_array;
2299 struct binder_proc *proc = thread->proc;
2300 struct binder_proc *target_proc = t->to_proc;
2301
2302 fd_buf_size = sizeof(u32) * fda->num_fds;
2303 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2304 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2305 proc->pid, thread->pid, (u64)fda->num_fds);
2306 return -EINVAL;
2307 }
2308 if (fd_buf_size > parent->length ||
2309 fda->parent_offset > parent->length - fd_buf_size) {
2310 /* No space for all file descriptors here. */
2311 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2312 proc->pid, thread->pid, (u64)fda->num_fds);
2313 return -EINVAL;
2314 }
2315 /*
2316 * Since the parent was already fixed up, convert it
2317 * back to the kernel address space to access it
2318 */
Todd Kjosd325d372016-10-10 10:40:53 -07002319 parent_buffer = parent->buffer -
2320 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002321 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2322 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2323 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2324 proc->pid, thread->pid);
2325 return -EINVAL;
2326 }
2327 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2328 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2329 in_reply_to);
2330 if (target_fd < 0)
2331 goto err_translate_fd_failed;
2332 fd_array[fdi] = target_fd;
2333 }
2334 return 0;
2335
2336err_translate_fd_failed:
2337 /*
2338 * Failed to allocate fd or security error, free fds
2339 * installed so far.
2340 */
2341 num_installed_fds = fdi;
2342 for (fdi = 0; fdi < num_installed_fds; fdi++)
2343 task_close_fd(target_proc, fd_array[fdi]);
2344 return target_fd;
2345}
2346
Martijn Coenen5a6da532016-09-30 14:10:07 +02002347static int binder_fixup_parent(struct binder_transaction *t,
2348 struct binder_thread *thread,
2349 struct binder_buffer_object *bp,
2350 binder_size_t *off_start,
2351 binder_size_t num_valid,
2352 struct binder_buffer_object *last_fixup_obj,
2353 binder_size_t last_fixup_min_off)
2354{
2355 struct binder_buffer_object *parent;
2356 u8 *parent_buffer;
2357 struct binder_buffer *b = t->buffer;
2358 struct binder_proc *proc = thread->proc;
2359 struct binder_proc *target_proc = t->to_proc;
2360
2361 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2362 return 0;
2363
2364 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2365 if (!parent) {
2366 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2367 proc->pid, thread->pid);
2368 return -EINVAL;
2369 }
2370
2371 if (!binder_validate_fixup(b, off_start,
2372 parent, bp->parent_offset,
2373 last_fixup_obj,
2374 last_fixup_min_off)) {
2375 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2376 proc->pid, thread->pid);
2377 return -EINVAL;
2378 }
2379
2380 if (parent->length < sizeof(binder_uintptr_t) ||
2381 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2382 /* No space for a pointer here! */
2383 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2384 proc->pid, thread->pid);
2385 return -EINVAL;
2386 }
2387 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002388 binder_alloc_get_user_buffer_offset(
2389 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002390 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2391
2392 return 0;
2393}
2394
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002395static void binder_transaction(struct binder_proc *proc,
2396 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002397 struct binder_transaction_data *tr, int reply,
2398 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002399{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002400 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002401 struct binder_transaction *t;
2402 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002403 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002404 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002405 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002406 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002407 struct binder_thread *target_thread = NULL;
2408 struct binder_node *target_node = NULL;
2409 struct list_head *target_list;
2410 wait_queue_head_t *target_wait;
2411 struct binder_transaction *in_reply_to = NULL;
2412 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002413 uint32_t return_error = 0;
2414 uint32_t return_error_param = 0;
2415 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002416 struct binder_buffer_object *last_fixup_obj = NULL;
2417 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002418 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002419 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002420
2421 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002422 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002423 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2424 e->from_proc = proc->pid;
2425 e->from_thread = thread->pid;
2426 e->target_handle = tr->target.handle;
2427 e->data_size = tr->data_size;
2428 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002429 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002430
2431 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002432 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002433 in_reply_to = thread->transaction_stack;
2434 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002435 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302436 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002437 proc->pid, thread->pid);
2438 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002439 return_error_param = -EPROTO;
2440 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002441 goto err_empty_call_stack;
2442 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002443 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002444 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302445 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002446 proc->pid, thread->pid, in_reply_to->debug_id,
2447 in_reply_to->to_proc ?
2448 in_reply_to->to_proc->pid : 0,
2449 in_reply_to->to_thread ?
2450 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002451 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002452 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002453 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002454 return_error_param = -EPROTO;
2455 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002456 in_reply_to = NULL;
2457 goto err_bad_call_stack;
2458 }
2459 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002460 binder_inner_proc_unlock(proc);
2461 binder_set_nice(in_reply_to->saved_priority);
2462 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002463 if (target_thread == NULL) {
2464 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002465 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002466 goto err_dead_binder;
2467 }
2468 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302469 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002470 proc->pid, thread->pid,
2471 target_thread->transaction_stack ?
2472 target_thread->transaction_stack->debug_id : 0,
2473 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002474 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002475 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002476 return_error_param = -EPROTO;
2477 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002478 in_reply_to = NULL;
2479 target_thread = NULL;
2480 goto err_dead_binder;
2481 }
2482 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002483 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002484 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002485 } else {
2486 if (tr->target.handle) {
2487 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002488
Todd Kjosc37162d2017-05-26 11:56:29 -07002489 /*
2490 * There must already be a strong ref
2491 * on this node. If so, do a strong
2492 * increment on the node to ensure it
2493 * stays alive until the transaction is
2494 * done.
2495 */
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002496 ref = binder_get_ref(proc, tr->target.handle, true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002497 if (ref) {
2498 binder_inc_node(ref->node, 1, 0, NULL);
2499 target_node = ref->node;
2500 }
2501 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302502 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002503 proc->pid, thread->pid);
2504 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002505 return_error_param = -EINVAL;
2506 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002507 goto err_invalid_target_handle;
2508 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002509 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002510 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002511 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002512 if (target_node == NULL) {
2513 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002514 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjose598d172017-03-22 17:19:52 -07002515 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002516 goto err_no_context_mgr_node;
2517 }
Todd Kjosc37162d2017-05-26 11:56:29 -07002518 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002519 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002520 }
2521 e->to_node = target_node->debug_id;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002522 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002523 target_proc = target_node->proc;
2524 if (target_proc == NULL) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002525 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002526 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002527 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002528 goto err_dead_binder;
2529 }
Todd Kjosb4827902017-05-25 15:52:17 -07002530 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002531 target_proc->tmp_ref++;
Todd Kjosb4827902017-05-25 15:52:17 -07002532 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002533 binder_node_unlock(target_node);
Stephen Smalley79af7302015-01-21 10:54:10 -05002534 if (security_binder_transaction(proc->tsk,
2535 target_proc->tsk) < 0) {
2536 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002537 return_error_param = -EPERM;
2538 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05002539 goto err_invalid_target_handle;
2540 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002541 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002542 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2543 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002544
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002545 tmp = thread->transaction_stack;
2546 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002547 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302548 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002549 proc->pid, thread->pid, tmp->debug_id,
2550 tmp->to_proc ? tmp->to_proc->pid : 0,
2551 tmp->to_thread ?
2552 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002553 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002554 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002555 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002556 return_error_param = -EPROTO;
2557 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002558 goto err_bad_call_stack;
2559 }
2560 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002561 struct binder_thread *from;
2562
2563 spin_lock(&tmp->lock);
2564 from = tmp->from;
2565 if (from && from->proc == target_proc) {
2566 atomic_inc(&from->tmp_ref);
2567 target_thread = from;
2568 spin_unlock(&tmp->lock);
2569 break;
2570 }
2571 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002572 tmp = tmp->from_parent;
2573 }
2574 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002575 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002576 }
2577 if (target_thread) {
2578 e->to_thread = target_thread->pid;
2579 target_list = &target_thread->todo;
2580 target_wait = &target_thread->wait;
2581 } else {
2582 target_list = &target_proc->todo;
2583 target_wait = &target_proc->wait;
2584 }
2585 e->to_proc = target_proc->pid;
2586
2587 /* TODO: reuse incoming transaction for reply */
2588 t = kzalloc(sizeof(*t), GFP_KERNEL);
2589 if (t == NULL) {
2590 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002591 return_error_param = -ENOMEM;
2592 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002593 goto err_alloc_t_failed;
2594 }
2595 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07002596 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002597
2598 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2599 if (tcomplete == NULL) {
2600 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002601 return_error_param = -ENOMEM;
2602 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002603 goto err_alloc_tcomplete_failed;
2604 }
2605 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2606
Todd Kjos1cfe6272017-05-24 13:33:28 -07002607 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002608
2609 if (reply)
2610 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002611 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002612 proc->pid, thread->pid, t->debug_id,
2613 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002614 (u64)tr->data.ptr.buffer,
2615 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002616 (u64)tr->data_size, (u64)tr->offsets_size,
2617 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002618 else
2619 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002620 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002621 proc->pid, thread->pid, t->debug_id,
2622 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002623 (u64)tr->data.ptr.buffer,
2624 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002625 (u64)tr->data_size, (u64)tr->offsets_size,
2626 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002627
2628 if (!reply && !(tr->flags & TF_ONE_WAY))
2629 t->from = thread;
2630 else
2631 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03002632 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002633 t->to_proc = target_proc;
2634 t->to_thread = target_thread;
2635 t->code = tr->code;
2636 t->flags = tr->flags;
2637 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002638
2639 trace_binder_transaction(reply, t, target_node);
2640
Todd Kjosd325d372016-10-10 10:40:53 -07002641 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02002642 tr->offsets_size, extra_buffers_size,
2643 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07002644 if (IS_ERR(t->buffer)) {
2645 /*
2646 * -ESRCH indicates VMA cleared. The target is dying.
2647 */
2648 return_error_param = PTR_ERR(t->buffer);
2649 return_error = return_error_param == -ESRCH ?
2650 BR_DEAD_REPLY : BR_FAILED_REPLY;
2651 return_error_line = __LINE__;
2652 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002653 goto err_binder_alloc_buf_failed;
2654 }
2655 t->buffer->allow_user_free = 0;
2656 t->buffer->debug_id = t->debug_id;
2657 t->buffer->transaction = t;
2658 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002659 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002660 off_start = (binder_size_t *)(t->buffer->data +
2661 ALIGN(tr->data_size, sizeof(void *)));
2662 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002663
Arve Hjønnevågda498892014-02-21 14:40:26 -08002664 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2665 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302666 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2667 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002668 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002669 return_error_param = -EFAULT;
2670 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002671 goto err_copy_data_failed;
2672 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002673 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2674 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302675 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2676 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002677 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002678 return_error_param = -EFAULT;
2679 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002680 goto err_copy_data_failed;
2681 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002682 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2683 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2684 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002685 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002686 return_error_param = -EINVAL;
2687 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002688 goto err_bad_offset;
2689 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02002690 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2691 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2692 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05302693 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002694 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002695 return_error_param = -EINVAL;
2696 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002697 goto err_bad_offset;
2698 }
2699 off_end = (void *)off_start + tr->offsets_size;
2700 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2701 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002702 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002703 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002704 struct binder_object_header *hdr;
2705 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002706
Martijn Coenen00c80372016-07-13 12:06:49 +02002707 if (object_size == 0 || *offp < off_min) {
2708 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002709 proc->pid, thread->pid, (u64)*offp,
2710 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02002711 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002712 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002713 return_error_param = -EINVAL;
2714 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002715 goto err_bad_offset;
2716 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002717
2718 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2719 off_min = *offp + object_size;
2720 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002721 case BINDER_TYPE_BINDER:
2722 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002723 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002724
Martijn Coenen00c80372016-07-13 12:06:49 +02002725 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002726 ret = binder_translate_binder(fp, t, thread);
2727 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02002728 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002729 return_error_param = ret;
2730 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002731 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002732 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002733 } break;
2734 case BINDER_TYPE_HANDLE:
2735 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002736 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002737
Martijn Coenen00c80372016-07-13 12:06:49 +02002738 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002739 ret = binder_translate_handle(fp, t, thread);
2740 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002741 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002742 return_error_param = ret;
2743 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002744 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002745 }
2746 } break;
2747
2748 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002749 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002750 int target_fd = binder_translate_fd(fp->fd, t, thread,
2751 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002752
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002753 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002754 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002755 return_error_param = target_fd;
2756 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002757 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002758 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002759 fp->pad_binder = 0;
2760 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002761 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002762 case BINDER_TYPE_FDA: {
2763 struct binder_fd_array_object *fda =
2764 to_binder_fd_array_object(hdr);
2765 struct binder_buffer_object *parent =
2766 binder_validate_ptr(t->buffer, fda->parent,
2767 off_start,
2768 offp - off_start);
2769 if (!parent) {
2770 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2771 proc->pid, thread->pid);
2772 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002773 return_error_param = -EINVAL;
2774 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002775 goto err_bad_parent;
2776 }
2777 if (!binder_validate_fixup(t->buffer, off_start,
2778 parent, fda->parent_offset,
2779 last_fixup_obj,
2780 last_fixup_min_off)) {
2781 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2782 proc->pid, thread->pid);
2783 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002784 return_error_param = -EINVAL;
2785 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002786 goto err_bad_parent;
2787 }
2788 ret = binder_translate_fd_array(fda, parent, t, thread,
2789 in_reply_to);
2790 if (ret < 0) {
2791 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002792 return_error_param = ret;
2793 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002794 goto err_translate_failed;
2795 }
2796 last_fixup_obj = parent;
2797 last_fixup_min_off =
2798 fda->parent_offset + sizeof(u32) * fda->num_fds;
2799 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002800 case BINDER_TYPE_PTR: {
2801 struct binder_buffer_object *bp =
2802 to_binder_buffer_object(hdr);
2803 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002804
Martijn Coenen5a6da532016-09-30 14:10:07 +02002805 if (bp->length > buf_left) {
2806 binder_user_error("%d:%d got transaction with too large buffer\n",
2807 proc->pid, thread->pid);
2808 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002809 return_error_param = -EINVAL;
2810 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002811 goto err_bad_offset;
2812 }
2813 if (copy_from_user(sg_bufp,
2814 (const void __user *)(uintptr_t)
2815 bp->buffer, bp->length)) {
2816 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2817 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07002818 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002819 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002820 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002821 goto err_copy_data_failed;
2822 }
2823 /* Fixup buffer pointer to target proc address space */
2824 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07002825 binder_alloc_get_user_buffer_offset(
2826 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002827 sg_bufp += ALIGN(bp->length, sizeof(u64));
2828
2829 ret = binder_fixup_parent(t, thread, bp, off_start,
2830 offp - off_start,
2831 last_fixup_obj,
2832 last_fixup_min_off);
2833 if (ret < 0) {
2834 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002835 return_error_param = ret;
2836 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002837 goto err_translate_failed;
2838 }
2839 last_fixup_obj = bp;
2840 last_fixup_min_off = 0;
2841 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002842 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002843 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002844 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002845 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002846 return_error_param = -EINVAL;
2847 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002848 goto err_bad_object_type;
2849 }
2850 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002851 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002852 binder_enqueue_work(proc, tcomplete, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002853 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002854
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002855 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002856 binder_inner_proc_lock(target_proc);
2857 if (target_thread->is_dead) {
2858 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002859 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002860 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002861 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002862 binder_pop_transaction_ilocked(target_thread, in_reply_to);
2863 binder_enqueue_work_ilocked(&t->work, target_list);
2864 binder_inner_proc_unlock(target_proc);
Todd Kjos21ef40a2017-03-30 18:02:13 -07002865 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002866 } else if (!(t->flags & TF_ONE_WAY)) {
2867 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002868 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002869 t->need_reply = 1;
2870 t->from_parent = thread->transaction_stack;
2871 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002872 binder_inner_proc_unlock(proc);
2873 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002874 if (target_proc->is_dead ||
2875 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002876 binder_inner_proc_unlock(target_proc);
2877 binder_inner_proc_lock(proc);
2878 binder_pop_transaction_ilocked(thread, t);
2879 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002880 goto err_dead_proc_or_thread;
2881 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002882 binder_enqueue_work_ilocked(&t->work, target_list);
2883 binder_inner_proc_unlock(target_proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002884 } else {
2885 BUG_ON(target_node == NULL);
2886 BUG_ON(t->buffer->async_transaction != 1);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002887 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002888 if (target_node->has_async_transaction) {
2889 target_list = &target_node->async_todo;
2890 target_wait = NULL;
2891 } else
2892 target_node->has_async_transaction = 1;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002893 /*
2894 * Test/set of has_async_transaction
2895 * must be atomic with enqueue on
2896 * async_todo
2897 */
Martijn Coenen995a36e2017-06-02 13:36:52 -07002898 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002899 if (target_proc->is_dead ||
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002900 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002901 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002902 binder_node_unlock(target_node);
Todd Kjos2f993e22017-05-12 14:42:55 -07002903 goto err_dead_proc_or_thread;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002904 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002905 binder_enqueue_work_ilocked(&t->work, target_list);
2906 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002907 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002908 }
Riley Andrewsb5968812015-09-01 12:42:07 -07002909 if (target_wait) {
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002910 if (reply || !(tr->flags & TF_ONE_WAY))
Riley Andrewsb5968812015-09-01 12:42:07 -07002911 wake_up_interruptible_sync(target_wait);
2912 else
2913 wake_up_interruptible(target_wait);
2914 }
Todd Kjos2f993e22017-05-12 14:42:55 -07002915 if (target_thread)
2916 binder_thread_dec_tmpref(target_thread);
2917 binder_proc_dec_tmpref(target_proc);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002918 /*
2919 * write barrier to synchronize with initialization
2920 * of log entry
2921 */
2922 smp_wmb();
2923 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002924 return;
2925
Todd Kjos2f993e22017-05-12 14:42:55 -07002926err_dead_proc_or_thread:
2927 return_error = BR_DEAD_REPLY;
2928 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002929err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002930err_bad_object_type:
2931err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002932err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002933err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002934 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002935 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjosc37162d2017-05-26 11:56:29 -07002936 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002937 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07002938 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002939err_binder_alloc_buf_failed:
2940 kfree(tcomplete);
2941 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2942err_alloc_tcomplete_failed:
2943 kfree(t);
2944 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2945err_alloc_t_failed:
2946err_bad_call_stack:
2947err_empty_call_stack:
2948err_dead_binder:
2949err_invalid_target_handle:
2950err_no_context_mgr_node:
Todd Kjos2f993e22017-05-12 14:42:55 -07002951 if (target_thread)
2952 binder_thread_dec_tmpref(target_thread);
2953 if (target_proc)
2954 binder_proc_dec_tmpref(target_proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002955 if (target_node)
2956 binder_dec_node(target_node, 1, 0);
2957
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002958 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07002959 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2960 proc->pid, thread->pid, return_error, return_error_param,
2961 (u64)tr->data_size, (u64)tr->offsets_size,
2962 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002963
2964 {
2965 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09002966
Todd Kjose598d172017-03-22 17:19:52 -07002967 e->return_error = return_error;
2968 e->return_error_param = return_error_param;
2969 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002970 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2971 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002972 /*
2973 * write barrier to synchronize with initialization
2974 * of log entry
2975 */
2976 smp_wmb();
2977 WRITE_ONCE(e->debug_id_done, t_debug_id);
2978 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002979 }
2980
Todd Kjos858b8da2017-04-21 17:35:12 -07002981 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002982 if (in_reply_to) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002983 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002984 binder_enqueue_work(thread->proc,
2985 &thread->return_error.work,
2986 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002987 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07002988 } else {
2989 thread->return_error.cmd = return_error;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002990 binder_enqueue_work(thread->proc,
2991 &thread->return_error.work,
2992 &thread->todo);
Todd Kjos858b8da2017-04-21 17:35:12 -07002993 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002994}
2995
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02002996static int binder_thread_write(struct binder_proc *proc,
2997 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002998 binder_uintptr_t binder_buffer, size_t size,
2999 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003000{
3001 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003002 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003003 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003004 void __user *ptr = buffer + *consumed;
3005 void __user *end = buffer + size;
3006
Todd Kjos858b8da2017-04-21 17:35:12 -07003007 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003008 int ret;
3009
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003010 if (get_user(cmd, (uint32_t __user *)ptr))
3011 return -EFAULT;
3012 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003013 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003014 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003015 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3016 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3017 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003018 }
3019 switch (cmd) {
3020 case BC_INCREFS:
3021 case BC_ACQUIRE:
3022 case BC_RELEASE:
3023 case BC_DECREFS: {
3024 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003025 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003026 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3027 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3028 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003029
3030 if (get_user(target, (uint32_t __user *)ptr))
3031 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003032
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003033 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003034 ret = -1;
3035 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003036 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003037 mutex_lock(&context->context_mgr_node_lock);
3038 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003039 if (ctx_mgr_node)
3040 ret = binder_inc_ref_for_node(
3041 proc, ctx_mgr_node,
3042 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003043 mutex_unlock(&context->context_mgr_node_lock);
3044 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003045 if (ret)
3046 ret = binder_update_ref_for_handle(
3047 proc, target, increment, strong,
3048 &rdata);
3049 if (!ret && rdata.desc != target) {
3050 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3051 proc->pid, thread->pid,
3052 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003053 }
3054 switch (cmd) {
3055 case BC_INCREFS:
3056 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003057 break;
3058 case BC_ACQUIRE:
3059 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003060 break;
3061 case BC_RELEASE:
3062 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003063 break;
3064 case BC_DECREFS:
3065 default:
3066 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003067 break;
3068 }
3069 if (ret) {
3070 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3071 proc->pid, thread->pid, debug_string,
3072 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003073 break;
3074 }
3075 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003076 "%d:%d %s ref %d desc %d s %d w %d\n",
3077 proc->pid, thread->pid, debug_string,
3078 rdata.debug_id, rdata.desc, rdata.strong,
3079 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003080 break;
3081 }
3082 case BC_INCREFS_DONE:
3083 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003084 binder_uintptr_t node_ptr;
3085 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003086 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003087 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003088
Arve Hjønnevågda498892014-02-21 14:40:26 -08003089 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003090 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003091 ptr += sizeof(binder_uintptr_t);
3092 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003093 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003094 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003095 node = binder_get_node(proc, node_ptr);
3096 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003097 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003098 proc->pid, thread->pid,
3099 cmd == BC_INCREFS_DONE ?
3100 "BC_INCREFS_DONE" :
3101 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003102 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003103 break;
3104 }
3105 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003106 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003107 proc->pid, thread->pid,
3108 cmd == BC_INCREFS_DONE ?
3109 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003110 (u64)node_ptr, node->debug_id,
3111 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003112 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003113 break;
3114 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003115 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003116 if (cmd == BC_ACQUIRE_DONE) {
3117 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303118 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003119 proc->pid, thread->pid,
3120 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003121 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003122 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003123 break;
3124 }
3125 node->pending_strong_ref = 0;
3126 } else {
3127 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303128 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003129 proc->pid, thread->pid,
3130 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003131 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003132 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003133 break;
3134 }
3135 node->pending_weak_ref = 0;
3136 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003137 free_node = binder_dec_node_nilocked(node,
3138 cmd == BC_ACQUIRE_DONE, 0);
3139 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003140 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003141 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003142 proc->pid, thread->pid,
3143 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003144 node->debug_id, node->local_strong_refs,
3145 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003146 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003147 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003148 break;
3149 }
3150 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303151 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003152 return -EINVAL;
3153 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303154 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003155 return -EINVAL;
3156
3157 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003158 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003159 struct binder_buffer *buffer;
3160
Arve Hjønnevågda498892014-02-21 14:40:26 -08003161 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003162 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003163 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003164
Todd Kjos076072a2017-04-21 14:32:11 -07003165 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3166 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003167 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003168 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3169 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003170 break;
3171 }
3172 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003173 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3174 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003175 break;
3176 }
3177 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003178 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3179 proc->pid, thread->pid, (u64)data_ptr,
3180 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003181 buffer->transaction ? "active" : "finished");
3182
3183 if (buffer->transaction) {
3184 buffer->transaction->buffer = NULL;
3185 buffer->transaction = NULL;
3186 }
3187 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003188 struct binder_node *buf_node;
3189 struct binder_work *w;
3190
3191 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003192 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003193 BUG_ON(!buf_node->has_async_transaction);
3194 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003195 w = binder_dequeue_work_head_ilocked(
3196 &buf_node->async_todo);
3197 if (!w)
3198 buf_node->has_async_transaction = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003199 else
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003200 binder_enqueue_work_ilocked(
3201 w, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003202 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003203 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003204 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003205 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003206 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003207 break;
3208 }
3209
Martijn Coenen5a6da532016-09-30 14:10:07 +02003210 case BC_TRANSACTION_SG:
3211 case BC_REPLY_SG: {
3212 struct binder_transaction_data_sg tr;
3213
3214 if (copy_from_user(&tr, ptr, sizeof(tr)))
3215 return -EFAULT;
3216 ptr += sizeof(tr);
3217 binder_transaction(proc, thread, &tr.transaction_data,
3218 cmd == BC_REPLY_SG, tr.buffers_size);
3219 break;
3220 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003221 case BC_TRANSACTION:
3222 case BC_REPLY: {
3223 struct binder_transaction_data tr;
3224
3225 if (copy_from_user(&tr, ptr, sizeof(tr)))
3226 return -EFAULT;
3227 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003228 binder_transaction(proc, thread, &tr,
3229 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003230 break;
3231 }
3232
3233 case BC_REGISTER_LOOPER:
3234 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303235 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003236 proc->pid, thread->pid);
3237 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3238 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303239 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003240 proc->pid, thread->pid);
3241 } else if (proc->requested_threads == 0) {
3242 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303243 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003244 proc->pid, thread->pid);
3245 } else {
3246 proc->requested_threads--;
3247 proc->requested_threads_started++;
3248 }
3249 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3250 break;
3251 case BC_ENTER_LOOPER:
3252 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303253 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003254 proc->pid, thread->pid);
3255 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3256 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303257 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003258 proc->pid, thread->pid);
3259 }
3260 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3261 break;
3262 case BC_EXIT_LOOPER:
3263 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303264 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003265 proc->pid, thread->pid);
3266 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3267 break;
3268
3269 case BC_REQUEST_DEATH_NOTIFICATION:
3270 case BC_CLEAR_DEATH_NOTIFICATION: {
3271 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003272 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003273 struct binder_ref *ref;
3274 struct binder_ref_death *death;
3275
3276 if (get_user(target, (uint32_t __user *)ptr))
3277 return -EFAULT;
3278 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003279 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003280 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003281 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003282 ref = binder_get_ref(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003283 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303284 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003285 proc->pid, thread->pid,
3286 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3287 "BC_REQUEST_DEATH_NOTIFICATION" :
3288 "BC_CLEAR_DEATH_NOTIFICATION",
3289 target);
3290 break;
3291 }
3292
3293 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003294 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003295 proc->pid, thread->pid,
3296 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3297 "BC_REQUEST_DEATH_NOTIFICATION" :
3298 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003299 (u64)cookie, ref->data.debug_id,
3300 ref->data.desc, ref->data.strong,
3301 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003302
3303 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3304 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303305 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003306 proc->pid, thread->pid);
3307 break;
3308 }
3309 death = kzalloc(sizeof(*death), GFP_KERNEL);
3310 if (death == NULL) {
Todd Kjos858b8da2017-04-21 17:35:12 -07003311 WARN_ON(thread->return_error.cmd !=
3312 BR_OK);
3313 thread->return_error.cmd = BR_ERROR;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003314 binder_enqueue_work(
3315 thread->proc,
3316 &thread->return_error.work,
3317 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003318 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303319 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003320 proc->pid, thread->pid);
3321 break;
3322 }
3323 binder_stats_created(BINDER_STAT_DEATH);
3324 INIT_LIST_HEAD(&death->work.entry);
3325 death->cookie = cookie;
3326 ref->death = death;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003327 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003328 if (ref->node->proc == NULL) {
3329 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003330 if (thread->looper &
3331 (BINDER_LOOPER_STATE_REGISTERED |
3332 BINDER_LOOPER_STATE_ENTERED))
3333 binder_enqueue_work(
3334 proc,
3335 &ref->death->work,
3336 &thread->todo);
3337 else {
3338 binder_enqueue_work(
3339 proc,
3340 &ref->death->work,
3341 &proc->todo);
3342 wake_up_interruptible(
3343 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003344 }
3345 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003346 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003347 } else {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003348 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003349 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303350 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003351 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003352 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003353 break;
3354 }
3355 death = ref->death;
3356 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003357 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003358 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003359 (u64)death->cookie,
3360 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003361 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003362 break;
3363 }
3364 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003365 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003366 if (list_empty(&death->work.entry)) {
3367 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003368 if (thread->looper &
3369 (BINDER_LOOPER_STATE_REGISTERED |
3370 BINDER_LOOPER_STATE_ENTERED))
3371 binder_enqueue_work_ilocked(
3372 &death->work,
3373 &thread->todo);
3374 else {
3375 binder_enqueue_work_ilocked(
3376 &death->work,
3377 &proc->todo);
3378 wake_up_interruptible(
3379 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003380 }
3381 } else {
3382 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3383 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3384 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003385 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003386 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003387 }
3388 } break;
3389 case BC_DEAD_BINDER_DONE: {
3390 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003391 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003392 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003393
Arve Hjønnevågda498892014-02-21 14:40:26 -08003394 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003395 return -EFAULT;
3396
Lisa Du7a64cd82016-02-17 09:32:52 +08003397 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003398 binder_inner_proc_lock(proc);
3399 list_for_each_entry(w, &proc->delivered_death,
3400 entry) {
3401 struct binder_ref_death *tmp_death =
3402 container_of(w,
3403 struct binder_ref_death,
3404 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003405
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003406 if (tmp_death->cookie == cookie) {
3407 death = tmp_death;
3408 break;
3409 }
3410 }
3411 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003412 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3413 proc->pid, thread->pid, (u64)cookie,
3414 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003415 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003416 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3417 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003418 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003419 break;
3420 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003421 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003422 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3423 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003424 if (thread->looper &
3425 (BINDER_LOOPER_STATE_REGISTERED |
3426 BINDER_LOOPER_STATE_ENTERED))
3427 binder_enqueue_work_ilocked(
3428 &death->work, &thread->todo);
3429 else {
3430 binder_enqueue_work_ilocked(
3431 &death->work,
3432 &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003433 wake_up_interruptible(&proc->wait);
3434 }
3435 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003436 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003437 } break;
3438
3439 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303440 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003441 proc->pid, thread->pid, cmd);
3442 return -EINVAL;
3443 }
3444 *consumed = ptr - buffer;
3445 }
3446 return 0;
3447}
3448
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003449static void binder_stat_br(struct binder_proc *proc,
3450 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003451{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003452 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003453 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003454 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3455 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3456 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003457 }
3458}
3459
3460static int binder_has_proc_work(struct binder_proc *proc,
3461 struct binder_thread *thread)
3462{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003463 return !binder_worklist_empty(proc, &proc->todo) ||
3464 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003465}
3466
3467static int binder_has_thread_work(struct binder_thread *thread)
3468{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003469 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3470 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003471}
3472
Todd Kjos60792612017-05-24 10:51:01 -07003473static int binder_put_node_cmd(struct binder_proc *proc,
3474 struct binder_thread *thread,
3475 void __user **ptrp,
3476 binder_uintptr_t node_ptr,
3477 binder_uintptr_t node_cookie,
3478 int node_debug_id,
3479 uint32_t cmd, const char *cmd_name)
3480{
3481 void __user *ptr = *ptrp;
3482
3483 if (put_user(cmd, (uint32_t __user *)ptr))
3484 return -EFAULT;
3485 ptr += sizeof(uint32_t);
3486
3487 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3488 return -EFAULT;
3489 ptr += sizeof(binder_uintptr_t);
3490
3491 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3492 return -EFAULT;
3493 ptr += sizeof(binder_uintptr_t);
3494
3495 binder_stat_br(proc, thread, cmd);
3496 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3497 proc->pid, thread->pid, cmd_name, node_debug_id,
3498 (u64)node_ptr, (u64)node_cookie);
3499
3500 *ptrp = ptr;
3501 return 0;
3502}
3503
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003504static int binder_thread_read(struct binder_proc *proc,
3505 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003506 binder_uintptr_t binder_buffer, size_t size,
3507 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003508{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003509 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003510 void __user *ptr = buffer + *consumed;
3511 void __user *end = buffer + size;
3512
3513 int ret = 0;
3514 int wait_for_proc_work;
3515
3516 if (*consumed == 0) {
3517 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3518 return -EFAULT;
3519 ptr += sizeof(uint32_t);
3520 }
3521
3522retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07003523 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003524 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07003525 binder_worklist_empty_ilocked(&thread->todo);
3526 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003527
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003528 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3529 if (wait_for_proc_work)
3530 proc->ready_threads++;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003531
3532 binder_unlock(__func__);
3533
3534 trace_binder_wait_for_work(wait_for_proc_work,
3535 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003536 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003537 if (wait_for_proc_work) {
3538 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3539 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303540 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003541 proc->pid, thread->pid, thread->looper);
3542 wait_event_interruptible(binder_user_error_wait,
3543 binder_stop_on_user_error < 2);
3544 }
3545 binder_set_nice(proc->default_priority);
3546 if (non_block) {
3547 if (!binder_has_proc_work(proc, thread))
3548 ret = -EAGAIN;
3549 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003550 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003551 } else {
3552 if (non_block) {
3553 if (!binder_has_thread_work(thread))
3554 ret = -EAGAIN;
3555 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003556 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003557 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003558
3559 binder_lock(__func__);
3560
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003561 if (wait_for_proc_work)
3562 proc->ready_threads--;
3563 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3564
3565 if (ret)
3566 return ret;
3567
3568 while (1) {
3569 uint32_t cmd;
3570 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003571 struct binder_work *w = NULL;
3572 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003573 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07003574 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003575
Todd Kjose7f23ed2017-03-21 13:06:01 -07003576 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003577 if (!binder_worklist_empty_ilocked(&thread->todo))
3578 list = &thread->todo;
3579 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3580 wait_for_proc_work)
3581 list = &proc->todo;
3582 else {
3583 binder_inner_proc_unlock(proc);
3584
Dmitry Voytik395262a2014-09-08 18:16:34 +04003585 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08003586 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003587 goto retry;
3588 break;
3589 }
3590
Todd Kjose7f23ed2017-03-21 13:06:01 -07003591 if (end - ptr < sizeof(tr) + 4) {
3592 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003593 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003594 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003595 w = binder_dequeue_work_head_ilocked(list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003596
3597 switch (w->type) {
3598 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003599 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003600 t = container_of(w, struct binder_transaction, work);
3601 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003602 case BINDER_WORK_RETURN_ERROR: {
3603 struct binder_error *e = container_of(
3604 w, struct binder_error, work);
3605
3606 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003607 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07003608 if (put_user(e->cmd, (uint32_t __user *)ptr))
3609 return -EFAULT;
3610 e->cmd = BR_OK;
3611 ptr += sizeof(uint32_t);
3612
3613 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07003614 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003615 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003616 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003617 cmd = BR_TRANSACTION_COMPLETE;
3618 if (put_user(cmd, (uint32_t __user *)ptr))
3619 return -EFAULT;
3620 ptr += sizeof(uint32_t);
3621
3622 binder_stat_br(proc, thread, cmd);
3623 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303624 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003625 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003626 kfree(w);
3627 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3628 } break;
3629 case BINDER_WORK_NODE: {
3630 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07003631 int strong, weak;
3632 binder_uintptr_t node_ptr = node->ptr;
3633 binder_uintptr_t node_cookie = node->cookie;
3634 int node_debug_id = node->debug_id;
3635 int has_weak_ref;
3636 int has_strong_ref;
3637 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09003638
Todd Kjos60792612017-05-24 10:51:01 -07003639 BUG_ON(proc != node->proc);
3640 strong = node->internal_strong_refs ||
3641 node->local_strong_refs;
3642 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07003643 node->local_weak_refs ||
3644 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07003645 has_strong_ref = node->has_strong_ref;
3646 has_weak_ref = node->has_weak_ref;
3647
3648 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003649 node->has_weak_ref = 1;
3650 node->pending_weak_ref = 1;
3651 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003652 }
3653 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003654 node->has_strong_ref = 1;
3655 node->pending_strong_ref = 1;
3656 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003657 }
3658 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003659 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003660 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003661 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003662 if (!weak && !strong) {
3663 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3664 "%d:%d node %d u%016llx c%016llx deleted\n",
3665 proc->pid, thread->pid,
3666 node_debug_id,
3667 (u64)node_ptr,
3668 (u64)node_cookie);
3669 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003670 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003671 binder_node_lock(node);
3672 /*
3673 * Acquire the node lock before freeing the
3674 * node to serialize with other threads that
3675 * may have been holding the node lock while
3676 * decrementing this node (avoids race where
3677 * this thread frees while the other thread
3678 * is unlocking the node after the final
3679 * decrement)
3680 */
3681 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003682 binder_free_node(node);
3683 } else
3684 binder_inner_proc_unlock(proc);
3685
Todd Kjos60792612017-05-24 10:51:01 -07003686 if (weak && !has_weak_ref)
3687 ret = binder_put_node_cmd(
3688 proc, thread, &ptr, node_ptr,
3689 node_cookie, node_debug_id,
3690 BR_INCREFS, "BR_INCREFS");
3691 if (!ret && strong && !has_strong_ref)
3692 ret = binder_put_node_cmd(
3693 proc, thread, &ptr, node_ptr,
3694 node_cookie, node_debug_id,
3695 BR_ACQUIRE, "BR_ACQUIRE");
3696 if (!ret && !strong && has_strong_ref)
3697 ret = binder_put_node_cmd(
3698 proc, thread, &ptr, node_ptr,
3699 node_cookie, node_debug_id,
3700 BR_RELEASE, "BR_RELEASE");
3701 if (!ret && !weak && has_weak_ref)
3702 ret = binder_put_node_cmd(
3703 proc, thread, &ptr, node_ptr,
3704 node_cookie, node_debug_id,
3705 BR_DECREFS, "BR_DECREFS");
3706 if (orig_ptr == ptr)
3707 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3708 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3709 proc->pid, thread->pid,
3710 node_debug_id,
3711 (u64)node_ptr,
3712 (u64)node_cookie);
3713 if (ret)
3714 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003715 } break;
3716 case BINDER_WORK_DEAD_BINDER:
3717 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3718 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3719 struct binder_ref_death *death;
3720 uint32_t cmd;
3721
3722 death = container_of(w, struct binder_ref_death, work);
3723 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3724 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3725 else
3726 cmd = BR_DEAD_BINDER;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003727 /*
3728 * TODO: there is a race condition between
3729 * death notification requests and delivery
3730 * of the notifications. This will be handled
3731 * in a later patch.
3732 */
3733 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003734 if (put_user(cmd, (uint32_t __user *)ptr))
3735 return -EFAULT;
3736 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003737 if (put_user(death->cookie,
3738 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003739 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003740 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003741 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003742 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003743 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003744 proc->pid, thread->pid,
3745 cmd == BR_DEAD_BINDER ?
3746 "BR_DEAD_BINDER" :
3747 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003748 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003749
3750 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003751 kfree(death);
3752 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003753 } else {
3754 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003755 binder_enqueue_work_ilocked(
3756 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003757 binder_inner_proc_unlock(proc);
3758 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003759 if (cmd == BR_DEAD_BINDER)
3760 goto done; /* DEAD_BINDER notifications can cause transactions */
3761 } break;
3762 }
3763
3764 if (!t)
3765 continue;
3766
3767 BUG_ON(t->buffer == NULL);
3768 if (t->buffer->target_node) {
3769 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09003770
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003771 tr.target.ptr = target_node->ptr;
3772 tr.cookie = target_node->cookie;
3773 t->saved_priority = task_nice(current);
3774 if (t->priority < target_node->min_priority &&
3775 !(t->flags & TF_ONE_WAY))
3776 binder_set_nice(t->priority);
3777 else if (!(t->flags & TF_ONE_WAY) ||
3778 t->saved_priority > target_node->min_priority)
3779 binder_set_nice(target_node->min_priority);
3780 cmd = BR_TRANSACTION;
3781 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003782 tr.target.ptr = 0;
3783 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003784 cmd = BR_REPLY;
3785 }
3786 tr.code = t->code;
3787 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06003788 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003789
Todd Kjos2f993e22017-05-12 14:42:55 -07003790 t_from = binder_get_txn_from(t);
3791 if (t_from) {
3792 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09003793
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003794 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08003795 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003796 } else {
3797 tr.sender_pid = 0;
3798 }
3799
3800 tr.data_size = t->buffer->data_size;
3801 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07003802 tr.data.ptr.buffer = (binder_uintptr_t)
3803 ((uintptr_t)t->buffer->data +
3804 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003805 tr.data.ptr.offsets = tr.data.ptr.buffer +
3806 ALIGN(t->buffer->data_size,
3807 sizeof(void *));
3808
Todd Kjos2f993e22017-05-12 14:42:55 -07003809 if (put_user(cmd, (uint32_t __user *)ptr)) {
3810 if (t_from)
3811 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003812 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003813 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003814 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07003815 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3816 if (t_from)
3817 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003818 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003819 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003820 ptr += sizeof(tr);
3821
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003822 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003823 binder_stat_br(proc, thread, cmd);
3824 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003825 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003826 proc->pid, thread->pid,
3827 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3828 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07003829 t->debug_id, t_from ? t_from->proc->pid : 0,
3830 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003831 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003832 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003833
Todd Kjos2f993e22017-05-12 14:42:55 -07003834 if (t_from)
3835 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003836 t->buffer->allow_user_free = 1;
3837 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003838 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003839 t->to_parent = thread->transaction_stack;
3840 t->to_thread = thread;
3841 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003842 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003843 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07003844 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003845 }
3846 break;
3847 }
3848
3849done:
3850
3851 *consumed = ptr - buffer;
3852 if (proc->requested_threads + proc->ready_threads == 0 &&
3853 proc->requested_threads_started < proc->max_threads &&
3854 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3855 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3856 /*spawn a new thread if we leave this out */) {
3857 proc->requested_threads++;
3858 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303859 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003860 proc->pid, thread->pid);
3861 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3862 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003863 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003864 }
3865 return 0;
3866}
3867
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003868static void binder_release_work(struct binder_proc *proc,
3869 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003870{
3871 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09003872
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003873 while (1) {
3874 w = binder_dequeue_work_head(proc, list);
3875 if (!w)
3876 return;
3877
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003878 switch (w->type) {
3879 case BINDER_WORK_TRANSACTION: {
3880 struct binder_transaction *t;
3881
3882 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003883 if (t->buffer->target_node &&
3884 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003885 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003886 } else {
3887 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303888 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003889 t->debug_id);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003890 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003891 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003892 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003893 case BINDER_WORK_RETURN_ERROR: {
3894 struct binder_error *e = container_of(
3895 w, struct binder_error, work);
3896
3897 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3898 "undelivered TRANSACTION_ERROR: %u\n",
3899 e->cmd);
3900 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003901 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003902 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303903 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003904 kfree(w);
3905 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3906 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003907 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3908 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3909 struct binder_ref_death *death;
3910
3911 death = container_of(w, struct binder_ref_death, work);
3912 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003913 "undelivered death notification, %016llx\n",
3914 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003915 kfree(death);
3916 binder_stats_deleted(BINDER_STAT_DEATH);
3917 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003918 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303919 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003920 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003921 break;
3922 }
3923 }
3924
3925}
3926
Todd Kjosb4827902017-05-25 15:52:17 -07003927static struct binder_thread *binder_get_thread_ilocked(
3928 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003929{
3930 struct binder_thread *thread = NULL;
3931 struct rb_node *parent = NULL;
3932 struct rb_node **p = &proc->threads.rb_node;
3933
3934 while (*p) {
3935 parent = *p;
3936 thread = rb_entry(parent, struct binder_thread, rb_node);
3937
3938 if (current->pid < thread->pid)
3939 p = &(*p)->rb_left;
3940 else if (current->pid > thread->pid)
3941 p = &(*p)->rb_right;
3942 else
Todd Kjosb4827902017-05-25 15:52:17 -07003943 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003944 }
Todd Kjosb4827902017-05-25 15:52:17 -07003945 if (!new_thread)
3946 return NULL;
3947 thread = new_thread;
3948 binder_stats_created(BINDER_STAT_THREAD);
3949 thread->proc = proc;
3950 thread->pid = current->pid;
3951 atomic_set(&thread->tmp_ref, 0);
3952 init_waitqueue_head(&thread->wait);
3953 INIT_LIST_HEAD(&thread->todo);
3954 rb_link_node(&thread->rb_node, parent, p);
3955 rb_insert_color(&thread->rb_node, &proc->threads);
3956 thread->looper_need_return = true;
3957 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3958 thread->return_error.cmd = BR_OK;
3959 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3960 thread->reply_error.cmd = BR_OK;
3961
3962 return thread;
3963}
3964
3965static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3966{
3967 struct binder_thread *thread;
3968 struct binder_thread *new_thread;
3969
3970 binder_inner_proc_lock(proc);
3971 thread = binder_get_thread_ilocked(proc, NULL);
3972 binder_inner_proc_unlock(proc);
3973 if (!thread) {
3974 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3975 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003976 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07003977 binder_inner_proc_lock(proc);
3978 thread = binder_get_thread_ilocked(proc, new_thread);
3979 binder_inner_proc_unlock(proc);
3980 if (thread != new_thread)
3981 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003982 }
3983 return thread;
3984}
3985
Todd Kjos2f993e22017-05-12 14:42:55 -07003986static void binder_free_proc(struct binder_proc *proc)
3987{
3988 BUG_ON(!list_empty(&proc->todo));
3989 BUG_ON(!list_empty(&proc->delivered_death));
3990 binder_alloc_deferred_release(&proc->alloc);
3991 put_task_struct(proc->tsk);
3992 binder_stats_deleted(BINDER_STAT_PROC);
3993 kfree(proc);
3994}
3995
3996static void binder_free_thread(struct binder_thread *thread)
3997{
3998 BUG_ON(!list_empty(&thread->todo));
3999 binder_stats_deleted(BINDER_STAT_THREAD);
4000 binder_proc_dec_tmpref(thread->proc);
4001 kfree(thread);
4002}
4003
4004static int binder_thread_release(struct binder_proc *proc,
4005 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004006{
4007 struct binder_transaction *t;
4008 struct binder_transaction *send_reply = NULL;
4009 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004010 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004011
Todd Kjosb4827902017-05-25 15:52:17 -07004012 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004013 /*
4014 * take a ref on the proc so it survives
4015 * after we remove this thread from proc->threads.
4016 * The corresponding dec is when we actually
4017 * free the thread in binder_free_thread()
4018 */
4019 proc->tmp_ref++;
4020 /*
4021 * take a ref on this thread to ensure it
4022 * survives while we are releasing it
4023 */
4024 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004025 rb_erase(&thread->rb_node, &proc->threads);
4026 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004027 if (t) {
4028 spin_lock(&t->lock);
4029 if (t->to_thread == thread)
4030 send_reply = t;
4031 }
4032 thread->is_dead = true;
4033
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004034 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004035 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004036 active_transactions++;
4037 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304038 "release %d:%d transaction %d %s, still active\n",
4039 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004040 t->debug_id,
4041 (t->to_thread == thread) ? "in" : "out");
4042
4043 if (t->to_thread == thread) {
4044 t->to_proc = NULL;
4045 t->to_thread = NULL;
4046 if (t->buffer) {
4047 t->buffer->transaction = NULL;
4048 t->buffer = NULL;
4049 }
4050 t = t->to_parent;
4051 } else if (t->from == thread) {
4052 t->from = NULL;
4053 t = t->from_parent;
4054 } else
4055 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004056 spin_unlock(&last_t->lock);
4057 if (t)
4058 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004059 }
Todd Kjosb4827902017-05-25 15:52:17 -07004060 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004061
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004062 if (send_reply)
4063 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004064 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004065 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004066 return active_transactions;
4067}
4068
4069static unsigned int binder_poll(struct file *filp,
4070 struct poll_table_struct *wait)
4071{
4072 struct binder_proc *proc = filp->private_data;
4073 struct binder_thread *thread = NULL;
4074 int wait_for_proc_work;
4075
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004076 binder_lock(__func__);
4077
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004078 thread = binder_get_thread(proc);
4079
Martijn Coenen995a36e2017-06-02 13:36:52 -07004080 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004081 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07004082 binder_worklist_empty_ilocked(&thread->todo);
4083 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004084
4085 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004086
4087 if (wait_for_proc_work) {
4088 if (binder_has_proc_work(proc, thread))
4089 return POLLIN;
4090 poll_wait(filp, &proc->wait, wait);
4091 if (binder_has_proc_work(proc, thread))
4092 return POLLIN;
4093 } else {
4094 if (binder_has_thread_work(thread))
4095 return POLLIN;
4096 poll_wait(filp, &thread->wait, wait);
4097 if (binder_has_thread_work(thread))
4098 return POLLIN;
4099 }
4100 return 0;
4101}
4102
Tair Rzayev78260ac2014-06-03 22:27:21 +03004103static int binder_ioctl_write_read(struct file *filp,
4104 unsigned int cmd, unsigned long arg,
4105 struct binder_thread *thread)
4106{
4107 int ret = 0;
4108 struct binder_proc *proc = filp->private_data;
4109 unsigned int size = _IOC_SIZE(cmd);
4110 void __user *ubuf = (void __user *)arg;
4111 struct binder_write_read bwr;
4112
4113 if (size != sizeof(struct binder_write_read)) {
4114 ret = -EINVAL;
4115 goto out;
4116 }
4117 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4118 ret = -EFAULT;
4119 goto out;
4120 }
4121 binder_debug(BINDER_DEBUG_READ_WRITE,
4122 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4123 proc->pid, thread->pid,
4124 (u64)bwr.write_size, (u64)bwr.write_buffer,
4125 (u64)bwr.read_size, (u64)bwr.read_buffer);
4126
4127 if (bwr.write_size > 0) {
4128 ret = binder_thread_write(proc, thread,
4129 bwr.write_buffer,
4130 bwr.write_size,
4131 &bwr.write_consumed);
4132 trace_binder_write_done(ret);
4133 if (ret < 0) {
4134 bwr.read_consumed = 0;
4135 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4136 ret = -EFAULT;
4137 goto out;
4138 }
4139 }
4140 if (bwr.read_size > 0) {
4141 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4142 bwr.read_size,
4143 &bwr.read_consumed,
4144 filp->f_flags & O_NONBLOCK);
4145 trace_binder_read_done(ret);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004146 if (!binder_worklist_empty(proc, &proc->todo))
Tair Rzayev78260ac2014-06-03 22:27:21 +03004147 wake_up_interruptible(&proc->wait);
4148 if (ret < 0) {
4149 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4150 ret = -EFAULT;
4151 goto out;
4152 }
4153 }
4154 binder_debug(BINDER_DEBUG_READ_WRITE,
4155 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4156 proc->pid, thread->pid,
4157 (u64)bwr.write_consumed, (u64)bwr.write_size,
4158 (u64)bwr.read_consumed, (u64)bwr.read_size);
4159 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4160 ret = -EFAULT;
4161 goto out;
4162 }
4163out:
4164 return ret;
4165}
4166
4167static int binder_ioctl_set_ctx_mgr(struct file *filp)
4168{
4169 int ret = 0;
4170 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004171 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004172 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004173 kuid_t curr_euid = current_euid();
4174
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004175 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004176 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004177 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4178 ret = -EBUSY;
4179 goto out;
4180 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004181 ret = security_binder_set_context_mgr(proc->tsk);
4182 if (ret < 0)
4183 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004184 if (uid_valid(context->binder_context_mgr_uid)) {
4185 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004186 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4187 from_kuid(&init_user_ns, curr_euid),
4188 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004189 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004190 ret = -EPERM;
4191 goto out;
4192 }
4193 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004194 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004195 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004196 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004197 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004198 ret = -ENOMEM;
4199 goto out;
4200 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004201 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004202 new_node->local_weak_refs++;
4203 new_node->local_strong_refs++;
4204 new_node->has_strong_ref = 1;
4205 new_node->has_weak_ref = 1;
4206 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004207 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004208 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004209out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004210 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004211 return ret;
4212}
4213
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004214static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4215{
4216 int ret;
4217 struct binder_proc *proc = filp->private_data;
4218 struct binder_thread *thread;
4219 unsigned int size = _IOC_SIZE(cmd);
4220 void __user *ubuf = (void __user *)arg;
4221
Tair Rzayev78260ac2014-06-03 22:27:21 +03004222 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4223 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004224
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004225 trace_binder_ioctl(cmd, arg);
4226
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004227 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4228 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004229 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004230
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004231 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004232 thread = binder_get_thread(proc);
4233 if (thread == NULL) {
4234 ret = -ENOMEM;
4235 goto err;
4236 }
4237
4238 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004239 case BINDER_WRITE_READ:
4240 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4241 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004242 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004243 break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004244 case BINDER_SET_MAX_THREADS:
4245 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
4246 ret = -EINVAL;
4247 goto err;
4248 }
4249 break;
4250 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004251 ret = binder_ioctl_set_ctx_mgr(filp);
4252 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004253 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004254 break;
4255 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304256 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004257 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004258 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004259 thread = NULL;
4260 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004261 case BINDER_VERSION: {
4262 struct binder_version __user *ver = ubuf;
4263
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004264 if (size != sizeof(struct binder_version)) {
4265 ret = -EINVAL;
4266 goto err;
4267 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004268 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4269 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004270 ret = -EINVAL;
4271 goto err;
4272 }
4273 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004274 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004275 default:
4276 ret = -EINVAL;
4277 goto err;
4278 }
4279 ret = 0;
4280err:
4281 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004282 thread->looper_need_return = false;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004283 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004284 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4285 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304286 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004287err_unlocked:
4288 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004289 return ret;
4290}
4291
4292static void binder_vma_open(struct vm_area_struct *vma)
4293{
4294 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004295
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004296 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304297 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004298 proc->pid, vma->vm_start, vma->vm_end,
4299 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4300 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004301}
4302
4303static void binder_vma_close(struct vm_area_struct *vma)
4304{
4305 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004306
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004307 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304308 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004309 proc->pid, vma->vm_start, vma->vm_end,
4310 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4311 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004312 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004313 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4314}
4315
Vinayak Menonddac7d52014-06-02 18:17:59 +05304316static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4317{
4318 return VM_FAULT_SIGBUS;
4319}
4320
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004321static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004322 .open = binder_vma_open,
4323 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304324 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004325};
4326
Todd Kjosd325d372016-10-10 10:40:53 -07004327static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4328{
4329 int ret;
4330 struct binder_proc *proc = filp->private_data;
4331 const char *failure_string;
4332
4333 if (proc->tsk != current->group_leader)
4334 return -EINVAL;
4335
4336 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4337 vma->vm_end = vma->vm_start + SZ_4M;
4338
4339 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4340 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4341 __func__, proc->pid, vma->vm_start, vma->vm_end,
4342 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4343 (unsigned long)pgprot_val(vma->vm_page_prot));
4344
4345 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4346 ret = -EPERM;
4347 failure_string = "bad vm_flags";
4348 goto err_bad_arg;
4349 }
4350 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4351 vma->vm_ops = &binder_vm_ops;
4352 vma->vm_private_data = proc;
4353
4354 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4355 if (ret)
4356 return ret;
4357 proc->files = get_files_struct(current);
4358 return 0;
4359
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004360err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004361 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004362 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4363 return ret;
4364}
4365
4366static int binder_open(struct inode *nodp, struct file *filp)
4367{
4368 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004369 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004370
4371 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4372 current->group_leader->pid, current->pid);
4373
4374 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4375 if (proc == NULL)
4376 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004377 spin_lock_init(&proc->inner_lock);
4378 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004379 get_task_struct(current->group_leader);
4380 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004381 INIT_LIST_HEAD(&proc->todo);
4382 init_waitqueue_head(&proc->wait);
4383 proc->default_priority = task_nice(current);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004384 binder_dev = container_of(filp->private_data, struct binder_device,
4385 miscdev);
4386 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004387 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004388
4389 binder_lock(__func__);
4390
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004391 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004392 proc->pid = current->group_leader->pid;
4393 INIT_LIST_HEAD(&proc->delivered_death);
4394 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004395
4396 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004397
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004398 mutex_lock(&binder_procs_lock);
4399 hlist_add_head(&proc->proc_node, &binder_procs);
4400 mutex_unlock(&binder_procs_lock);
4401
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004402 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004403 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004404
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004405 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004406 /*
4407 * proc debug entries are shared between contexts, so
4408 * this will fail if the process tries to open the driver
4409 * again with a different context. The priting code will
4410 * anyway print all contexts that a given PID has, so this
4411 * is not a problem.
4412 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004413 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004414 binder_debugfs_dir_entry_proc,
4415 (void *)(unsigned long)proc->pid,
4416 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004417 }
4418
4419 return 0;
4420}
4421
4422static int binder_flush(struct file *filp, fl_owner_t id)
4423{
4424 struct binder_proc *proc = filp->private_data;
4425
4426 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4427
4428 return 0;
4429}
4430
4431static void binder_deferred_flush(struct binder_proc *proc)
4432{
4433 struct rb_node *n;
4434 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004435
Todd Kjosb4827902017-05-25 15:52:17 -07004436 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004437 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4438 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004439
Todd Kjos6798e6d2017-01-06 14:19:25 -08004440 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004441 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4442 wake_up_interruptible(&thread->wait);
4443 wake_count++;
4444 }
4445 }
Todd Kjosb4827902017-05-25 15:52:17 -07004446 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004447 wake_up_interruptible_all(&proc->wait);
4448
4449 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4450 "binder_flush: %d woke %d threads\n", proc->pid,
4451 wake_count);
4452}
4453
4454static int binder_release(struct inode *nodp, struct file *filp)
4455{
4456 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004457
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004458 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004459 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4460
4461 return 0;
4462}
4463
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004464static int binder_node_release(struct binder_node *node, int refs)
4465{
4466 struct binder_ref *ref;
4467 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004468 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004469
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004470 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004471
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004472 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004473 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004474 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004475 /*
4476 * The caller must have taken a temporary ref on the node,
4477 */
4478 BUG_ON(!node->tmp_refs);
4479 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004480 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004481 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004482 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004483
4484 return refs;
4485 }
4486
4487 node->proc = NULL;
4488 node->local_strong_refs = 0;
4489 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004490 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004491
4492 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004493 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004494 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004495
4496 hlist_for_each_entry(ref, &node->refs, node_entry) {
4497 refs++;
4498
4499 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08004500 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004501
4502 death++;
4503
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004504 binder_inner_proc_lock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004505 if (list_empty(&ref->death->work.entry)) {
4506 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004507 binder_enqueue_work_ilocked(&ref->death->work,
4508 &ref->proc->todo);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004509 wake_up_interruptible(&ref->proc->wait);
4510 } else
4511 BUG();
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004512 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004513 }
4514
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004515 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4516 "node %d now dead, refs %d, death %d\n",
4517 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004518 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004519 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004520
4521 return refs;
4522}
4523
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004524static void binder_deferred_release(struct binder_proc *proc)
4525{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004526 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004527 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07004528 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004529
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004530 BUG_ON(proc->files);
4531
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004532 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004533 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004534 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004535
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004536 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004537 if (context->binder_context_mgr_node &&
4538 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004539 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004540 "%s: %d context_mgr_node gone\n",
4541 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004542 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004543 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004544 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07004545 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004546 /*
4547 * Make sure proc stays alive after we
4548 * remove all the threads
4549 */
4550 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004551
Todd Kjos2f993e22017-05-12 14:42:55 -07004552 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004553 threads = 0;
4554 active_transactions = 0;
4555 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004556 struct binder_thread *thread;
4557
4558 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004559 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004560 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07004561 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07004562 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004563 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004564
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004565 nodes = 0;
4566 incoming_refs = 0;
4567 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004568 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004569
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004570 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004571 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07004572 /*
4573 * take a temporary ref on the node before
4574 * calling binder_node_release() which will either
4575 * kfree() the node or call binder_put_node()
4576 */
Todd Kjos425d23f2017-06-12 12:07:26 -07004577 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004578 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07004579 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004580 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07004581 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004582 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004583 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004584
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004585 outgoing_refs = 0;
4586 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004587 struct binder_ref *ref;
4588
4589 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004590 outgoing_refs++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07004591 binder_cleanup_ref(ref);
4592 binder_free_ref(ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004593 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004594
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004595 binder_release_work(proc, &proc->todo);
4596 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004597
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004598 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07004599 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004600 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07004601 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004602
Todd Kjos2f993e22017-05-12 14:42:55 -07004603 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004604}
4605
4606static void binder_deferred_func(struct work_struct *work)
4607{
4608 struct binder_proc *proc;
4609 struct files_struct *files;
4610
4611 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09004612
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004613 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004614 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004615 mutex_lock(&binder_deferred_lock);
4616 if (!hlist_empty(&binder_deferred_list)) {
4617 proc = hlist_entry(binder_deferred_list.first,
4618 struct binder_proc, deferred_work_node);
4619 hlist_del_init(&proc->deferred_work_node);
4620 defer = proc->deferred_work;
4621 proc->deferred_work = 0;
4622 } else {
4623 proc = NULL;
4624 defer = 0;
4625 }
4626 mutex_unlock(&binder_deferred_lock);
4627
4628 files = NULL;
4629 if (defer & BINDER_DEFERRED_PUT_FILES) {
4630 files = proc->files;
4631 if (files)
4632 proc->files = NULL;
4633 }
4634
4635 if (defer & BINDER_DEFERRED_FLUSH)
4636 binder_deferred_flush(proc);
4637
4638 if (defer & BINDER_DEFERRED_RELEASE)
4639 binder_deferred_release(proc); /* frees proc */
4640
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004641 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004642 if (files)
4643 put_files_struct(files);
4644 } while (proc);
4645}
4646static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4647
4648static void
4649binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4650{
4651 mutex_lock(&binder_deferred_lock);
4652 proc->deferred_work |= defer;
4653 if (hlist_unhashed(&proc->deferred_work_node)) {
4654 hlist_add_head(&proc->deferred_work_node,
4655 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05304656 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004657 }
4658 mutex_unlock(&binder_deferred_lock);
4659}
4660
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004661static void print_binder_transaction(struct seq_file *m, const char *prefix,
4662 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004663{
Todd Kjos2f993e22017-05-12 14:42:55 -07004664 spin_lock(&t->lock);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004665 seq_printf(m,
4666 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4667 prefix, t->debug_id, t,
4668 t->from ? t->from->proc->pid : 0,
4669 t->from ? t->from->pid : 0,
4670 t->to_proc ? t->to_proc->pid : 0,
4671 t->to_thread ? t->to_thread->pid : 0,
4672 t->code, t->flags, t->priority, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07004673 spin_unlock(&t->lock);
4674
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004675 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004676 seq_puts(m, " buffer free\n");
4677 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004678 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004679 if (t->buffer->target_node)
4680 seq_printf(m, " node %d",
4681 t->buffer->target_node->debug_id);
4682 seq_printf(m, " size %zd:%zd data %p\n",
4683 t->buffer->data_size, t->buffer->offsets_size,
4684 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004685}
4686
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004687static void print_binder_work_ilocked(struct seq_file *m, const char *prefix,
4688 const char *transaction_prefix,
4689 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004690{
4691 struct binder_node *node;
4692 struct binder_transaction *t;
4693
4694 switch (w->type) {
4695 case BINDER_WORK_TRANSACTION:
4696 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004697 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004698 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004699 case BINDER_WORK_RETURN_ERROR: {
4700 struct binder_error *e = container_of(
4701 w, struct binder_error, work);
4702
4703 seq_printf(m, "%stransaction error: %u\n",
4704 prefix, e->cmd);
4705 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004706 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004707 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004708 break;
4709 case BINDER_WORK_NODE:
4710 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08004711 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4712 prefix, node->debug_id,
4713 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004714 break;
4715 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004716 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004717 break;
4718 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004719 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004720 break;
4721 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004722 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004723 break;
4724 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004725 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004726 break;
4727 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004728}
4729
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004730static void print_binder_thread_ilocked(struct seq_file *m,
4731 struct binder_thread *thread,
4732 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004733{
4734 struct binder_transaction *t;
4735 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004736 size_t start_pos = m->count;
4737 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004738
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004739 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07004740 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08004741 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07004742 thread->looper_need_return,
4743 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004744 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004745 t = thread->transaction_stack;
4746 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004747 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004748 print_binder_transaction(m,
4749 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004750 t = t->from_parent;
4751 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004752 print_binder_transaction(m,
4753 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004754 t = t->to_parent;
4755 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004756 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004757 t = NULL;
4758 }
4759 }
4760 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004761 print_binder_work_ilocked(m, " ",
4762 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004763 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004764 if (!print_always && m->count == header_pos)
4765 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004766}
4767
Todd Kjos425d23f2017-06-12 12:07:26 -07004768static void print_binder_node_nilocked(struct seq_file *m,
4769 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004770{
4771 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004772 struct binder_work *w;
4773 int count;
4774
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004775 WARN_ON(!spin_is_locked(&node->lock));
Todd Kjos425d23f2017-06-12 12:07:26 -07004776 if (node->proc)
4777 WARN_ON(!spin_is_locked(&node->proc->inner_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004778
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004779 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08004780 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004781 count++;
4782
Todd Kjosf22abc72017-05-09 11:08:05 -07004783 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08004784 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004785 node->has_strong_ref, node->has_weak_ref,
4786 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07004787 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004788 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004789 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08004790 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004791 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004792 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004793 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004794 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004795 list_for_each_entry(w, &node->async_todo, entry)
4796 print_binder_work_ilocked(m, " ",
4797 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004798 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004799}
4800
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004801static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004802{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004803 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004804 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4805 ref->data.debug_id, ref->data.desc,
4806 ref->node->proc ? "" : "dead ",
4807 ref->node->debug_id, ref->data.strong,
4808 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004809 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004810}
4811
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004812static void print_binder_proc(struct seq_file *m,
4813 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004814{
4815 struct binder_work *w;
4816 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004817 size_t start_pos = m->count;
4818 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07004819 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004820
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004821 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004822 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004823 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004824
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004825 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004826 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004827 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004828 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07004829
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004830 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004831 struct binder_node *node = rb_entry(n, struct binder_node,
4832 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07004833 /*
4834 * take a temporary reference on the node so it
4835 * survives and isn't removed from the tree
4836 * while we print it.
4837 */
4838 binder_inc_node_tmpref_ilocked(node);
4839 /* Need to drop inner lock to take node lock */
4840 binder_inner_proc_unlock(proc);
4841 if (last_node)
4842 binder_put_node(last_node);
4843 binder_node_inner_lock(node);
4844 print_binder_node_nilocked(m, node);
4845 binder_node_inner_unlock(node);
4846 last_node = node;
4847 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004848 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004849 binder_inner_proc_unlock(proc);
4850 if (last_node)
4851 binder_put_node(last_node);
4852
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004853 if (print_all) {
4854 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004855 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004856 n = rb_next(n))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004857 print_binder_ref(m, rb_entry(n, struct binder_ref,
4858 rb_node_desc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004859 }
Todd Kjosd325d372016-10-10 10:40:53 -07004860 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004861 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004862 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004863 print_binder_work_ilocked(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004864 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004865 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004866 break;
4867 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004868 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004869 if (!print_all && m->count == header_pos)
4870 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004871}
4872
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004873static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004874 "BR_ERROR",
4875 "BR_OK",
4876 "BR_TRANSACTION",
4877 "BR_REPLY",
4878 "BR_ACQUIRE_RESULT",
4879 "BR_DEAD_REPLY",
4880 "BR_TRANSACTION_COMPLETE",
4881 "BR_INCREFS",
4882 "BR_ACQUIRE",
4883 "BR_RELEASE",
4884 "BR_DECREFS",
4885 "BR_ATTEMPT_ACQUIRE",
4886 "BR_NOOP",
4887 "BR_SPAWN_LOOPER",
4888 "BR_FINISHED",
4889 "BR_DEAD_BINDER",
4890 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4891 "BR_FAILED_REPLY"
4892};
4893
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004894static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004895 "BC_TRANSACTION",
4896 "BC_REPLY",
4897 "BC_ACQUIRE_RESULT",
4898 "BC_FREE_BUFFER",
4899 "BC_INCREFS",
4900 "BC_ACQUIRE",
4901 "BC_RELEASE",
4902 "BC_DECREFS",
4903 "BC_INCREFS_DONE",
4904 "BC_ACQUIRE_DONE",
4905 "BC_ATTEMPT_ACQUIRE",
4906 "BC_REGISTER_LOOPER",
4907 "BC_ENTER_LOOPER",
4908 "BC_EXIT_LOOPER",
4909 "BC_REQUEST_DEATH_NOTIFICATION",
4910 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02004911 "BC_DEAD_BINDER_DONE",
4912 "BC_TRANSACTION_SG",
4913 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004914};
4915
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004916static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004917 "proc",
4918 "thread",
4919 "node",
4920 "ref",
4921 "death",
4922 "transaction",
4923 "transaction_complete"
4924};
4925
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004926static void print_binder_stats(struct seq_file *m, const char *prefix,
4927 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004928{
4929 int i;
4930
4931 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004932 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004933 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004934 int temp = atomic_read(&stats->bc[i]);
4935
4936 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004937 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004938 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004939 }
4940
4941 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004942 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004943 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004944 int temp = atomic_read(&stats->br[i]);
4945
4946 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004947 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004948 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004949 }
4950
4951 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004952 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004953 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004954 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004955 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004956 int created = atomic_read(&stats->obj_created[i]);
4957 int deleted = atomic_read(&stats->obj_deleted[i]);
4958
4959 if (created || deleted)
4960 seq_printf(m, "%s%s: active %d total %d\n",
4961 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004962 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004963 created - deleted,
4964 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004965 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004966}
4967
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004968static void print_binder_proc_stats(struct seq_file *m,
4969 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004970{
4971 struct binder_work *w;
4972 struct rb_node *n;
4973 int count, strong, weak;
Todd Kjosb4827902017-05-25 15:52:17 -07004974 size_t free_async_space =
4975 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004976
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004977 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004978 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004979 count = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07004980 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004981 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4982 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004983 seq_printf(m, " threads: %d\n", count);
4984 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004985 " ready threads %d\n"
4986 " free async space %zd\n", proc->requested_threads,
4987 proc->requested_threads_started, proc->max_threads,
Todd Kjosd325d372016-10-10 10:40:53 -07004988 proc->ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07004989 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004990 count = 0;
4991 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4992 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07004993 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004994 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004995 count = 0;
4996 strong = 0;
4997 weak = 0;
4998 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4999 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5000 rb_node_desc);
5001 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005002 strong += ref->data.strong;
5003 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005004 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005005 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005006
Todd Kjosd325d372016-10-10 10:40:53 -07005007 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005008 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005009
5010 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005011 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005012 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005013 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005014 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005015 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005016 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005017 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005018
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005019 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005020}
5021
5022
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005023static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005024{
5025 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005026 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005027 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005028
Todd Kjos48b33212017-05-24 11:53:13 -07005029 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005030
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005031 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005032
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005033 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005034 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005035 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005036 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5037 /*
5038 * take a temporary reference on the node so it
5039 * survives and isn't removed from the list
5040 * while we print it.
5041 */
5042 node->tmp_refs++;
5043 spin_unlock(&binder_dead_nodes_lock);
5044 if (last_node)
5045 binder_put_node(last_node);
5046 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005047 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005048 binder_node_unlock(node);
5049 last_node = node;
5050 spin_lock(&binder_dead_nodes_lock);
5051 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005052 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005053 if (last_node)
5054 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005055
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005056 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005057 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005058 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005059 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005060 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005061 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005062}
5063
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005064static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005065{
5066 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005067
Todd Kjos48b33212017-05-24 11:53:13 -07005068 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005069
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005070 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005071
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005072 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005073
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005074 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005075 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005076 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005077 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005078 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005079 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005080}
5081
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005082static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005083{
5084 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005085
Todd Kjos48b33212017-05-24 11:53:13 -07005086 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005087
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005088 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005089 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005090 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005091 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005092 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005093 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005094 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005095}
5096
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005097static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005098{
Riley Andrews83050a42016-02-09 21:05:33 -08005099 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005100 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005101
Todd Kjos48b33212017-05-24 11:53:13 -07005102 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08005103
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005104 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005105 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005106 if (itr->pid == pid) {
5107 seq_puts(m, "binder proc state:\n");
5108 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005109 }
5110 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005111 mutex_unlock(&binder_procs_lock);
5112
Todd Kjos48b33212017-05-24 11:53:13 -07005113 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005114 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005115}
5116
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005117static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005118 struct binder_transaction_log_entry *e)
5119{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005120 int debug_id = READ_ONCE(e->debug_id_done);
5121 /*
5122 * read barrier to guarantee debug_id_done read before
5123 * we print the log values
5124 */
5125 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005126 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005127 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005128 e->debug_id, (e->call_type == 2) ? "reply" :
5129 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005130 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005131 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5132 e->return_error, e->return_error_param,
5133 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005134 /*
5135 * read-barrier to guarantee read of debug_id_done after
5136 * done printing the fields of the entry
5137 */
5138 smp_rmb();
5139 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5140 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005141}
5142
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005143static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005144{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005145 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005146 unsigned int log_cur = atomic_read(&log->cur);
5147 unsigned int count;
5148 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005149 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005150
Todd Kjos1cfe6272017-05-24 13:33:28 -07005151 count = log_cur + 1;
5152 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5153 0 : count % ARRAY_SIZE(log->entry);
5154 if (count > ARRAY_SIZE(log->entry) || log->full)
5155 count = ARRAY_SIZE(log->entry);
5156 for (i = 0; i < count; i++) {
5157 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5158
5159 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005160 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005161 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005162}
5163
5164static const struct file_operations binder_fops = {
5165 .owner = THIS_MODULE,
5166 .poll = binder_poll,
5167 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005168 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005169 .mmap = binder_mmap,
5170 .open = binder_open,
5171 .flush = binder_flush,
5172 .release = binder_release,
5173};
5174
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005175BINDER_DEBUG_ENTRY(state);
5176BINDER_DEBUG_ENTRY(stats);
5177BINDER_DEBUG_ENTRY(transactions);
5178BINDER_DEBUG_ENTRY(transaction_log);
5179
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005180static int __init init_binder_device(const char *name)
5181{
5182 int ret;
5183 struct binder_device *binder_device;
5184
5185 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5186 if (!binder_device)
5187 return -ENOMEM;
5188
5189 binder_device->miscdev.fops = &binder_fops;
5190 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5191 binder_device->miscdev.name = name;
5192
5193 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5194 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005195 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005196
5197 ret = misc_register(&binder_device->miscdev);
5198 if (ret < 0) {
5199 kfree(binder_device);
5200 return ret;
5201 }
5202
5203 hlist_add_head(&binder_device->hlist, &binder_devices);
5204
5205 return ret;
5206}
5207
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005208static int __init binder_init(void)
5209{
5210 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005211 char *device_name, *device_names;
5212 struct binder_device *device;
5213 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005214
Todd Kjos1cfe6272017-05-24 13:33:28 -07005215 atomic_set(&binder_transaction_log.cur, ~0U);
5216 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5217
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005218 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5219 if (binder_debugfs_dir_entry_root)
5220 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5221 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005222
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005223 if (binder_debugfs_dir_entry_root) {
5224 debugfs_create_file("state",
5225 S_IRUGO,
5226 binder_debugfs_dir_entry_root,
5227 NULL,
5228 &binder_state_fops);
5229 debugfs_create_file("stats",
5230 S_IRUGO,
5231 binder_debugfs_dir_entry_root,
5232 NULL,
5233 &binder_stats_fops);
5234 debugfs_create_file("transactions",
5235 S_IRUGO,
5236 binder_debugfs_dir_entry_root,
5237 NULL,
5238 &binder_transactions_fops);
5239 debugfs_create_file("transaction_log",
5240 S_IRUGO,
5241 binder_debugfs_dir_entry_root,
5242 &binder_transaction_log,
5243 &binder_transaction_log_fops);
5244 debugfs_create_file("failed_transaction_log",
5245 S_IRUGO,
5246 binder_debugfs_dir_entry_root,
5247 &binder_transaction_log_failed,
5248 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005249 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005250
5251 /*
5252 * Copy the module_parameter string, because we don't want to
5253 * tokenize it in-place.
5254 */
5255 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5256 if (!device_names) {
5257 ret = -ENOMEM;
5258 goto err_alloc_device_names_failed;
5259 }
5260 strcpy(device_names, binder_devices_param);
5261
5262 while ((device_name = strsep(&device_names, ","))) {
5263 ret = init_binder_device(device_name);
5264 if (ret)
5265 goto err_init_binder_device_failed;
5266 }
5267
5268 return ret;
5269
5270err_init_binder_device_failed:
5271 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5272 misc_deregister(&device->miscdev);
5273 hlist_del(&device->hlist);
5274 kfree(device);
5275 }
5276err_alloc_device_names_failed:
5277 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5278
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005279 return ret;
5280}
5281
5282device_initcall(binder_init);
5283
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005284#define CREATE_TRACE_POINTS
5285#include "binder_trace.h"
5286
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005287MODULE_LICENSE("GPL v2");