blob: 29ae9174662ca3ba8d3665529d9f34245062725b [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
Martijn Coenen995a36e2017-06-02 13:36:52 -070033 * proc->delivered_death and node->async_todo), as well as
34 * thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070079#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070080#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090081
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070082static DEFINE_MUTEX(binder_main_lock);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070083
84static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090085static DEFINE_MUTEX(binder_deferred_lock);
86
Martijn Coenen6b7c7122016-09-30 16:08:09 +020087static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090088static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070089static DEFINE_MUTEX(binder_procs_lock);
90
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070092static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090093
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070094static struct dentry *binder_debugfs_dir_entry_root;
95static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070096static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090097
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070098#define BINDER_DEBUG_ENTRY(name) \
99static int binder_##name##_open(struct inode *inode, struct file *file) \
100{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700101 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700102} \
103\
104static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110}
111
112static int binder_proc_show(struct seq_file *m, void *unused);
113BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900114
115/* This is only defined in include/asm-arm/sizes.h */
116#ifndef SZ_1K
117#define SZ_1K 0x400
118#endif
119
120#ifndef SZ_4M
121#define SZ_4M 0x400000
122#endif
123
124#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900144};
145static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200149static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150module_param_named(devices, binder_devices_param, charp, S_IRUGO);
151
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900152static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153static int binder_stop_on_user_error;
154
155static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157{
158 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900159
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164}
165module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168#define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400171 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900172 } while (0)
173
174#define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400177 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
Martijn Coenen00c80372016-07-13 12:06:49 +0200182#define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
Martijn Coenen5a6da532016-09-30 14:10:07 +0200187#define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200190#define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900193enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202};
203
204struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900209};
210
211static struct binder_stats binder_stats;
212
213static inline void binder_stats_deleted(enum binder_stat_types type)
214{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700215 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900216}
217
218static inline void binder_stats_created(enum binder_stat_types type)
219{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700220 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900221}
222
223struct binder_transaction_log_entry {
224 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700225 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200238 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900239};
240struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700241 atomic_t cur;
242 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900243 struct binder_transaction_log_entry entry[32];
244};
245static struct binder_transaction_log binder_transaction_log;
246static struct binder_transaction_log binder_transaction_log_failed;
247
248static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250{
251 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700252 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900253
Todd Kjos1cfe6272017-05-24 13:33:28 -0700254 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900255 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900265 return e;
266}
267
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200268struct binder_context {
269 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700270 struct mutex context_mgr_node_lock;
271
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200272 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200273 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200274};
275
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200276struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200280};
281
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700282/**
283 * struct binder_work - work enqueued on a worklist
284 * @entry: node enqueued on list
285 * @type: type of work to be performed
286 *
287 * There are separate work lists for proc, thread, and node (async).
288 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900289struct binder_work {
290 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700291
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900292 enum {
293 BINDER_WORK_TRANSACTION = 1,
294 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700295 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900296 BINDER_WORK_NODE,
297 BINDER_WORK_DEAD_BINDER,
298 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
299 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
300 } type;
301};
302
Todd Kjos858b8da2017-04-21 17:35:12 -0700303struct binder_error {
304 struct binder_work work;
305 uint32_t cmd;
306};
307
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700308/**
309 * struct binder_node - binder node bookkeeping
310 * @debug_id: unique ID for debugging
311 * (invariant after initialized)
312 * @lock: lock for node fields
313 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700314 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700315 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700316 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700317 * @dead_node: element for binder_dead_nodes list
318 * (protected by binder_dead_nodes_lock)
319 * @proc: binder_proc that owns this node
320 * (invariant after initialized)
321 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700322 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700323 * @internal_strong_refs: used to take strong references when
324 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700327 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700330 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700331 * (protected by @proc->inner_lock if @proc
332 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700333 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700334 * (protected by @proc->inner_lock while @proc
335 * is valid, and by binder_dead_nodes_lock
336 * if @proc is NULL. During inc/dec and node release
337 * it is also protected by @lock to provide safety
338 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700339 * @ptr: userspace pointer for node
340 * (invariant, no lock needed)
341 * @cookie: userspace cookie for node
342 * (invariant, no lock needed)
343 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700346 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700349 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700352 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700353 * (protected by @proc->inner_lock if @proc
354 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700355 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700356 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700362 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700363 *
364 * Bookkeeping structure for binder nodes.
365 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900366struct binder_node {
367 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700368 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900369 struct binder_work work;
370 union {
371 struct rb_node rb_node;
372 struct hlist_node dead_node;
373 };
374 struct binder_proc *proc;
375 struct hlist_head refs;
376 int internal_strong_refs;
377 int local_weak_refs;
378 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700379 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800380 binder_uintptr_t ptr;
381 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700382 struct {
383 /*
384 * bitfield elements protected by
385 * proc inner_lock
386 */
387 u8 has_strong_ref:1;
388 u8 pending_strong_ref:1;
389 u8 has_weak_ref:1;
390 u8 pending_weak_ref:1;
391 };
392 struct {
393 /*
394 * invariant after initialization
395 */
396 u8 accept_fds:1;
397 u8 min_priority;
398 };
399 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400 struct list_head async_todo;
401};
402
403struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700404 /**
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
408 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900409 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800410 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411};
412
Todd Kjosb0117bb2017-05-08 09:16:27 -0700413/**
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
419 *
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
424 */
425struct binder_ref_data {
426 int debug_id;
427 uint32_t desc;
428 int strong;
429 int weak;
430};
431
432/**
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700438 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
444 *
445 * Structure to track references from procA to target node (on procB). This
446 * structure is unsafe to access without holding @proc->outer_lock.
447 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900448struct binder_ref {
449 /* Lookups needed: */
450 /* node + proc => ref (transaction) */
451 /* desc + proc => ref (transaction, inc/dec ref) */
452 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700453 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900454 struct rb_node rb_node_desc;
455 struct rb_node rb_node_node;
456 struct hlist_node node_entry;
457 struct binder_proc *proc;
458 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900459 struct binder_ref_death *death;
460};
461
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900462enum binder_deferred_state {
463 BINDER_DEFERRED_PUT_FILES = 0x01,
464 BINDER_DEFERRED_FLUSH = 0x02,
465 BINDER_DEFERRED_RELEASE = 0x04,
466};
467
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700468/**
469 * struct binder_proc - binder process bookkeeping
470 * @proc_node: element for binder_procs list
471 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700472 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700473 * @nodes: rbtree of binder nodes associated with
474 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700475 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700476 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700477 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700478 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700479 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700480 * @pid PID of group_leader of process
481 * (invariant after initialized)
482 * @tsk task_struct for group_leader of process
483 * (invariant after initialized)
484 * @files files_struct for process
485 * (invariant after initialized)
486 * @deferred_work_node: element for binder_deferred_list
487 * (protected by binder_deferred_lock)
488 * @deferred_work: bitmap of deferred work to perform
489 * (protected by binder_deferred_lock)
490 * @is_dead: process is dead and awaiting free
491 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700492 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700493 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700494 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700495 * @wait: wait queue head to wait for proc work
496 * (invariant after initialized)
497 * @stats: per-process binder statistics
498 * (atomics, no lock needed)
499 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700500 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700501 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700502 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700503 * @requested_threads: number of binder threads requested but not
504 * yet started. In current implementation, can
505 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700506 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700507 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700508 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700509 * @ready_threads: number of threads waiting for proc work
Todd Kjosd600e902017-05-25 17:35:02 -0700510 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700511 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700512 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700513 * @default_priority: default scheduler priority
514 * (invariant after initialized)
515 * @debugfs_entry: debugfs node
516 * @alloc: binder allocator bookkeeping
517 * @context: binder_context for this proc
518 * (invariant after initialized)
519 * @inner_lock: can nest under outer_lock and/or node lock
520 * @outer_lock: no nesting under innor or node lock
521 * Lock order: 1) outer, 2) node, 3) inner
522 *
523 * Bookkeeping structure for binder processes
524 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900525struct binder_proc {
526 struct hlist_node proc_node;
527 struct rb_root threads;
528 struct rb_root nodes;
529 struct rb_root refs_by_desc;
530 struct rb_root refs_by_node;
531 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900532 struct task_struct *tsk;
533 struct files_struct *files;
534 struct hlist_node deferred_work_node;
535 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700536 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900537
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900538 struct list_head todo;
539 wait_queue_head_t wait;
540 struct binder_stats stats;
541 struct list_head delivered_death;
542 int max_threads;
543 int requested_threads;
544 int requested_threads_started;
545 int ready_threads;
Todd Kjos2f993e22017-05-12 14:42:55 -0700546 int tmp_ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900547 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700548 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700549 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200550 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700551 spinlock_t inner_lock;
552 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900553};
554
555enum {
556 BINDER_LOOPER_STATE_REGISTERED = 0x01,
557 BINDER_LOOPER_STATE_ENTERED = 0x02,
558 BINDER_LOOPER_STATE_EXITED = 0x04,
559 BINDER_LOOPER_STATE_INVALID = 0x08,
560 BINDER_LOOPER_STATE_WAITING = 0x10,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900561};
562
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700563/**
564 * struct binder_thread - binder thread bookkeeping
565 * @proc: binder process for this thread
566 * (invariant after initialization)
567 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700568 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700569 * @pid: PID for this thread
570 * (invariant after initialization)
571 * @looper: bitmap of looping state
572 * (only accessed by this thread)
573 * @looper_needs_return: looping thread needs to exit driver
574 * (no lock needed)
575 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700576 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700577 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700578 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700579 * @return_error: transaction errors reported by this thread
580 * (only accessed by this thread)
581 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700582 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700583 * @wait: wait queue for thread work
584 * @stats: per-thread statistics
585 * (atomics, no lock needed)
586 * @tmp_ref: temporary reference to indicate thread is in use
587 * (atomic since @proc->inner_lock cannot
588 * always be acquired)
589 * @is_dead: thread is dead and awaiting free
590 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700591 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700592 *
593 * Bookkeeping structure for binder threads.
594 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900595struct binder_thread {
596 struct binder_proc *proc;
597 struct rb_node rb_node;
598 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800599 int looper; /* only modified by this thread */
600 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900601 struct binder_transaction *transaction_stack;
602 struct list_head todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700603 struct binder_error return_error;
604 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900605 wait_queue_head_t wait;
606 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700607 atomic_t tmp_ref;
608 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900609};
610
611struct binder_transaction {
612 int debug_id;
613 struct binder_work work;
614 struct binder_thread *from;
615 struct binder_transaction *from_parent;
616 struct binder_proc *to_proc;
617 struct binder_thread *to_thread;
618 struct binder_transaction *to_parent;
619 unsigned need_reply:1;
620 /* unsigned is_dead:1; */ /* not used at the moment */
621
622 struct binder_buffer *buffer;
623 unsigned int code;
624 unsigned int flags;
625 long priority;
626 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600627 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700628 /**
629 * @lock: protects @from, @to_proc, and @to_thread
630 *
631 * @from, @to_proc, and @to_thread can be set to NULL
632 * during thread teardown
633 */
634 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900635};
636
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700637/**
638 * binder_proc_lock() - Acquire outer lock for given binder_proc
639 * @proc: struct binder_proc to acquire
640 *
641 * Acquires proc->outer_lock. Used to protect binder_ref
642 * structures associated with the given proc.
643 */
644#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
645static void
646_binder_proc_lock(struct binder_proc *proc, int line)
647{
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_lock(&proc->outer_lock);
651}
652
653/**
654 * binder_proc_unlock() - Release spinlock for given binder_proc
655 * @proc: struct binder_proc to acquire
656 *
657 * Release lock acquired via binder_proc_lock()
658 */
659#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
660static void
661_binder_proc_unlock(struct binder_proc *proc, int line)
662{
663 binder_debug(BINDER_DEBUG_SPINLOCKS,
664 "%s: line=%d\n", __func__, line);
665 spin_unlock(&proc->outer_lock);
666}
667
668/**
669 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
670 * @proc: struct binder_proc to acquire
671 *
672 * Acquires proc->inner_lock. Used to protect todo lists
673 */
674#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
675static void
676_binder_inner_proc_lock(struct binder_proc *proc, int line)
677{
678 binder_debug(BINDER_DEBUG_SPINLOCKS,
679 "%s: line=%d\n", __func__, line);
680 spin_lock(&proc->inner_lock);
681}
682
683/**
684 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
685 * @proc: struct binder_proc to acquire
686 *
687 * Release lock acquired via binder_inner_proc_lock()
688 */
689#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
690static void
691_binder_inner_proc_unlock(struct binder_proc *proc, int line)
692{
693 binder_debug(BINDER_DEBUG_SPINLOCKS,
694 "%s: line=%d\n", __func__, line);
695 spin_unlock(&proc->inner_lock);
696}
697
698/**
699 * binder_node_lock() - Acquire spinlock for given binder_node
700 * @node: struct binder_node to acquire
701 *
702 * Acquires node->lock. Used to protect binder_node fields
703 */
704#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
705static void
706_binder_node_lock(struct binder_node *node, int line)
707{
708 binder_debug(BINDER_DEBUG_SPINLOCKS,
709 "%s: line=%d\n", __func__, line);
710 spin_lock(&node->lock);
711}
712
713/**
714 * binder_node_unlock() - Release spinlock for given binder_proc
715 * @node: struct binder_node to acquire
716 *
717 * Release lock acquired via binder_node_lock()
718 */
719#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
720static void
721_binder_node_unlock(struct binder_node *node, int line)
722{
723 binder_debug(BINDER_DEBUG_SPINLOCKS,
724 "%s: line=%d\n", __func__, line);
725 spin_unlock(&node->lock);
726}
727
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700728/**
729 * binder_node_inner_lock() - Acquire node and inner locks
730 * @node: struct binder_node to acquire
731 *
732 * Acquires node->lock. If node->proc also acquires
733 * proc->inner_lock. Used to protect binder_node fields
734 */
735#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
736static void
737_binder_node_inner_lock(struct binder_node *node, int line)
738{
739 binder_debug(BINDER_DEBUG_SPINLOCKS,
740 "%s: line=%d\n", __func__, line);
741 spin_lock(&node->lock);
742 if (node->proc)
743 binder_inner_proc_lock(node->proc);
744}
745
746/**
747 * binder_node_unlock() - Release node and inner locks
748 * @node: struct binder_node to acquire
749 *
750 * Release lock acquired via binder_node_lock()
751 */
752#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
753static void
754_binder_node_inner_unlock(struct binder_node *node, int line)
755{
756 struct binder_proc *proc = node->proc;
757
758 binder_debug(BINDER_DEBUG_SPINLOCKS,
759 "%s: line=%d\n", __func__, line);
760 if (proc)
761 binder_inner_proc_unlock(proc);
762 spin_unlock(&node->lock);
763}
764
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700765static bool binder_worklist_empty_ilocked(struct list_head *list)
766{
767 return list_empty(list);
768}
769
770/**
771 * binder_worklist_empty() - Check if no items on the work list
772 * @proc: binder_proc associated with list
773 * @list: list to check
774 *
775 * Return: true if there are no items on list, else false
776 */
777static bool binder_worklist_empty(struct binder_proc *proc,
778 struct list_head *list)
779{
780 bool ret;
781
782 binder_inner_proc_lock(proc);
783 ret = binder_worklist_empty_ilocked(list);
784 binder_inner_proc_unlock(proc);
785 return ret;
786}
787
788static void
789binder_enqueue_work_ilocked(struct binder_work *work,
790 struct list_head *target_list)
791{
792 BUG_ON(target_list == NULL);
793 BUG_ON(work->entry.next && !list_empty(&work->entry));
794 list_add_tail(&work->entry, target_list);
795}
796
797/**
798 * binder_enqueue_work() - Add an item to the work list
799 * @proc: binder_proc associated with list
800 * @work: struct binder_work to add to list
801 * @target_list: list to add work to
802 *
803 * Adds the work to the specified list. Asserts that work
804 * is not already on a list.
805 */
806static void
807binder_enqueue_work(struct binder_proc *proc,
808 struct binder_work *work,
809 struct list_head *target_list)
810{
811 binder_inner_proc_lock(proc);
812 binder_enqueue_work_ilocked(work, target_list);
813 binder_inner_proc_unlock(proc);
814}
815
816static void
817binder_dequeue_work_ilocked(struct binder_work *work)
818{
819 list_del_init(&work->entry);
820}
821
822/**
823 * binder_dequeue_work() - Removes an item from the work list
824 * @proc: binder_proc associated with list
825 * @work: struct binder_work to remove from list
826 *
827 * Removes the specified work item from whatever list it is on.
828 * Can safely be called if work is not on any list.
829 */
830static void
831binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
832{
833 binder_inner_proc_lock(proc);
834 binder_dequeue_work_ilocked(work);
835 binder_inner_proc_unlock(proc);
836}
837
838static struct binder_work *binder_dequeue_work_head_ilocked(
839 struct list_head *list)
840{
841 struct binder_work *w;
842
843 w = list_first_entry_or_null(list, struct binder_work, entry);
844 if (w)
845 list_del_init(&w->entry);
846 return w;
847}
848
849/**
850 * binder_dequeue_work_head() - Dequeues the item at head of list
851 * @proc: binder_proc associated with list
852 * @list: list to dequeue head
853 *
854 * Removes the head of the list if there are items on the list
855 *
856 * Return: pointer dequeued binder_work, NULL if list was empty
857 */
858static struct binder_work *binder_dequeue_work_head(
859 struct binder_proc *proc,
860 struct list_head *list)
861{
862 struct binder_work *w;
863
864 binder_inner_proc_lock(proc);
865 w = binder_dequeue_work_head_ilocked(list);
866 binder_inner_proc_unlock(proc);
867 return w;
868}
869
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900870static void
871binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700872static void binder_free_thread(struct binder_thread *thread);
873static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700874static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900875
Sachin Kamatefde99c2012-08-17 16:39:36 +0530876static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900877{
878 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900879 unsigned long rlim_cur;
880 unsigned long irqs;
881
882 if (files == NULL)
883 return -ESRCH;
884
Al Virodcfadfa2012-08-12 17:27:30 -0400885 if (!lock_task_sighand(proc->tsk, &irqs))
886 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900887
Al Virodcfadfa2012-08-12 17:27:30 -0400888 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
889 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900890
Al Virodcfadfa2012-08-12 17:27:30 -0400891 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900892}
893
894/*
895 * copied from fd_install
896 */
897static void task_fd_install(
898 struct binder_proc *proc, unsigned int fd, struct file *file)
899{
Al Virof869e8a2012-08-15 21:06:33 -0400900 if (proc->files)
901 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900902}
903
904/*
905 * copied from sys_close
906 */
907static long task_close_fd(struct binder_proc *proc, unsigned int fd)
908{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900909 int retval;
910
Al Viro483ce1d2012-08-19 12:04:24 -0400911 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900912 return -ESRCH;
913
Al Viro483ce1d2012-08-19 12:04:24 -0400914 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900915 /* can't restart close syscall because file table entry was cleared */
916 if (unlikely(retval == -ERESTARTSYS ||
917 retval == -ERESTARTNOINTR ||
918 retval == -ERESTARTNOHAND ||
919 retval == -ERESTART_RESTARTBLOCK))
920 retval = -EINTR;
921
922 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900923}
924
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700925static inline void binder_lock(const char *tag)
926{
927 trace_binder_lock(tag);
928 mutex_lock(&binder_main_lock);
929 trace_binder_locked(tag);
930}
931
932static inline void binder_unlock(const char *tag)
933{
934 trace_binder_unlock(tag);
935 mutex_unlock(&binder_main_lock);
936}
937
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900938static void binder_set_nice(long nice)
939{
940 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900941
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900942 if (can_nice(current, nice)) {
943 set_user_nice(current, nice);
944 return;
945 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900946 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900947 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530948 "%d: nice value %ld not allowed use %ld instead\n",
949 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900950 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800951 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900952 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530953 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900954}
955
Todd Kjos425d23f2017-06-12 12:07:26 -0700956static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
957 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900958{
959 struct rb_node *n = proc->nodes.rb_node;
960 struct binder_node *node;
961
Todd Kjos425d23f2017-06-12 12:07:26 -0700962 BUG_ON(!spin_is_locked(&proc->inner_lock));
963
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900964 while (n) {
965 node = rb_entry(n, struct binder_node, rb_node);
966
967 if (ptr < node->ptr)
968 n = n->rb_left;
969 else if (ptr > node->ptr)
970 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -0700971 else {
972 /*
973 * take an implicit weak reference
974 * to ensure node stays alive until
975 * call to binder_put_node()
976 */
Todd Kjos425d23f2017-06-12 12:07:26 -0700977 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900978 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -0700979 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900980 }
981 return NULL;
982}
983
Todd Kjos425d23f2017-06-12 12:07:26 -0700984static struct binder_node *binder_get_node(struct binder_proc *proc,
985 binder_uintptr_t ptr)
986{
987 struct binder_node *node;
988
989 binder_inner_proc_lock(proc);
990 node = binder_get_node_ilocked(proc, ptr);
991 binder_inner_proc_unlock(proc);
992 return node;
993}
994
995static struct binder_node *binder_init_node_ilocked(
996 struct binder_proc *proc,
997 struct binder_node *new_node,
998 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900999{
1000 struct rb_node **p = &proc->nodes.rb_node;
1001 struct rb_node *parent = NULL;
1002 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001003 binder_uintptr_t ptr = fp ? fp->binder : 0;
1004 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1005 __u32 flags = fp ? fp->flags : 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001006
Todd Kjos425d23f2017-06-12 12:07:26 -07001007 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001008 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001009
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001010 parent = *p;
1011 node = rb_entry(parent, struct binder_node, rb_node);
1012
1013 if (ptr < node->ptr)
1014 p = &(*p)->rb_left;
1015 else if (ptr > node->ptr)
1016 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001017 else {
1018 /*
1019 * A matching node is already in
1020 * the rb tree. Abandon the init
1021 * and return it.
1022 */
1023 binder_inc_node_tmpref_ilocked(node);
1024 return node;
1025 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001026 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001027 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001028 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001029 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001030 rb_link_node(&node->rb_node, parent, p);
1031 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001032 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001033 node->proc = proc;
1034 node->ptr = ptr;
1035 node->cookie = cookie;
1036 node->work.type = BINDER_WORK_NODE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001037 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1038 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001039 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001040 INIT_LIST_HEAD(&node->work.entry);
1041 INIT_LIST_HEAD(&node->async_todo);
1042 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001043 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001044 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001045 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001046
1047 return node;
1048}
1049
1050static struct binder_node *binder_new_node(struct binder_proc *proc,
1051 struct flat_binder_object *fp)
1052{
1053 struct binder_node *node;
1054 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1055
1056 if (!new_node)
1057 return NULL;
1058 binder_inner_proc_lock(proc);
1059 node = binder_init_node_ilocked(proc, new_node, fp);
1060 binder_inner_proc_unlock(proc);
1061 if (node != new_node)
1062 /*
1063 * The node was already added by another thread
1064 */
1065 kfree(new_node);
1066
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001067 return node;
1068}
1069
Todd Kjose7f23ed2017-03-21 13:06:01 -07001070static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001071{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001072 kfree(node);
1073 binder_stats_deleted(BINDER_STAT_NODE);
1074}
1075
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001076static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1077 int internal,
1078 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001079{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001080 struct binder_proc *proc = node->proc;
1081
1082 BUG_ON(!spin_is_locked(&node->lock));
1083 if (proc)
1084 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001085 if (strong) {
1086 if (internal) {
1087 if (target_list == NULL &&
1088 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001089 !(node->proc &&
1090 node == node->proc->context->
1091 binder_context_mgr_node &&
1092 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301093 pr_err("invalid inc strong node for %d\n",
1094 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001095 return -EINVAL;
1096 }
1097 node->internal_strong_refs++;
1098 } else
1099 node->local_strong_refs++;
1100 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001101 binder_dequeue_work_ilocked(&node->work);
1102 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001103 }
1104 } else {
1105 if (!internal)
1106 node->local_weak_refs++;
1107 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1108 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301109 pr_err("invalid inc weak node for %d\n",
1110 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001111 return -EINVAL;
1112 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001113 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001114 }
1115 }
1116 return 0;
1117}
1118
Todd Kjose7f23ed2017-03-21 13:06:01 -07001119static int binder_inc_node(struct binder_node *node, int strong, int internal,
1120 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001121{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001122 int ret;
1123
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001124 binder_node_inner_lock(node);
1125 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1126 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001127
1128 return ret;
1129}
1130
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001131static bool binder_dec_node_nilocked(struct binder_node *node,
1132 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001133{
1134 struct binder_proc *proc = node->proc;
1135
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001136 BUG_ON(!spin_is_locked(&node->lock));
Todd Kjose7f23ed2017-03-21 13:06:01 -07001137 if (proc)
1138 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001139 if (strong) {
1140 if (internal)
1141 node->internal_strong_refs--;
1142 else
1143 node->local_strong_refs--;
1144 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001145 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001146 } else {
1147 if (!internal)
1148 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001149 if (node->local_weak_refs || node->tmp_refs ||
1150 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001151 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001152 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001153
1154 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001155 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001156 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001157 wake_up_interruptible(&node->proc->wait);
1158 }
1159 } else {
1160 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001161 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001162 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001163 binder_dequeue_work_ilocked(&node->work);
1164 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001165 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301166 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001167 node->debug_id);
1168 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001169 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001170 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001171 /*
1172 * tmp_refs could have changed so
1173 * check it again
1174 */
1175 if (node->tmp_refs) {
1176 spin_unlock(&binder_dead_nodes_lock);
1177 return false;
1178 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001179 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001180 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001181 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301182 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001183 node->debug_id);
1184 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001185 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001186 }
1187 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001188 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001189}
1190
Todd Kjose7f23ed2017-03-21 13:06:01 -07001191static void binder_dec_node(struct binder_node *node, int strong, int internal)
1192{
1193 bool free_node;
1194
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001195 binder_node_inner_lock(node);
1196 free_node = binder_dec_node_nilocked(node, strong, internal);
1197 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001198 if (free_node)
1199 binder_free_node(node);
1200}
1201
1202static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001203{
1204 /*
1205 * No call to binder_inc_node() is needed since we
1206 * don't need to inform userspace of any changes to
1207 * tmp_refs
1208 */
1209 node->tmp_refs++;
1210}
1211
1212/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001213 * binder_inc_node_tmpref() - take a temporary reference on node
1214 * @node: node to reference
1215 *
1216 * Take reference on node to prevent the node from being freed
1217 * while referenced only by a local variable. The inner lock is
1218 * needed to serialize with the node work on the queue (which
1219 * isn't needed after the node is dead). If the node is dead
1220 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1221 * node->tmp_refs against dead-node-only cases where the node
1222 * lock cannot be acquired (eg traversing the dead node list to
1223 * print nodes)
1224 */
1225static void binder_inc_node_tmpref(struct binder_node *node)
1226{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001227 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001228 if (node->proc)
1229 binder_inner_proc_lock(node->proc);
1230 else
1231 spin_lock(&binder_dead_nodes_lock);
1232 binder_inc_node_tmpref_ilocked(node);
1233 if (node->proc)
1234 binder_inner_proc_unlock(node->proc);
1235 else
1236 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001237 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001238}
1239
1240/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001241 * binder_dec_node_tmpref() - remove a temporary reference on node
1242 * @node: node to reference
1243 *
1244 * Release temporary reference on node taken via binder_inc_node_tmpref()
1245 */
1246static void binder_dec_node_tmpref(struct binder_node *node)
1247{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001248 bool free_node;
1249
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001250 binder_node_inner_lock(node);
1251 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001252 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001253 node->tmp_refs--;
1254 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001255 if (!node->proc)
1256 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001257 /*
1258 * Call binder_dec_node() to check if all refcounts are 0
1259 * and cleanup is needed. Calling with strong=0 and internal=1
1260 * causes no actual reference to be released in binder_dec_node().
1261 * If that changes, a change is needed here too.
1262 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001263 free_node = binder_dec_node_nilocked(node, 0, 1);
1264 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001265 if (free_node)
1266 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001267}
1268
1269static void binder_put_node(struct binder_node *node)
1270{
1271 binder_dec_node_tmpref(node);
1272}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001273
Todd Kjos5346bf32016-10-20 16:43:34 -07001274static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1275 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001276{
1277 struct rb_node *n = proc->refs_by_desc.rb_node;
1278 struct binder_ref *ref;
1279
1280 while (n) {
1281 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1282
Todd Kjosb0117bb2017-05-08 09:16:27 -07001283 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001284 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001285 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001286 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001287 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001288 binder_user_error("tried to use weak ref as strong ref\n");
1289 return NULL;
1290 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001291 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001292 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001293 }
1294 return NULL;
1295}
1296
Todd Kjosb0117bb2017-05-08 09:16:27 -07001297/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001298 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001299 * @proc: binder_proc that owns the ref
1300 * @node: binder_node of target
1301 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1302 *
1303 * Look up the ref for the given node and return it if it exists
1304 *
1305 * If it doesn't exist and the caller provides a newly allocated
1306 * ref, initialize the fields of the newly allocated ref and insert
1307 * into the given proc rb_trees and node refs list.
1308 *
1309 * Return: the ref for node. It is possible that another thread
1310 * allocated/initialized the ref first in which case the
1311 * returned ref would be different than the passed-in
1312 * new_ref. new_ref must be kfree'd by the caller in
1313 * this case.
1314 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001315static struct binder_ref *binder_get_ref_for_node_olocked(
1316 struct binder_proc *proc,
1317 struct binder_node *node,
1318 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001319{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001320 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001321 struct rb_node **p = &proc->refs_by_node.rb_node;
1322 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001323 struct binder_ref *ref;
1324 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001325
1326 while (*p) {
1327 parent = *p;
1328 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1329
1330 if (node < ref->node)
1331 p = &(*p)->rb_left;
1332 else if (node > ref->node)
1333 p = &(*p)->rb_right;
1334 else
1335 return ref;
1336 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001337 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001338 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001339
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001340 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001341 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001342 new_ref->proc = proc;
1343 new_ref->node = node;
1344 rb_link_node(&new_ref->rb_node_node, parent, p);
1345 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1346
Todd Kjosb0117bb2017-05-08 09:16:27 -07001347 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001348 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1349 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001350 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001351 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001352 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001353 }
1354
1355 p = &proc->refs_by_desc.rb_node;
1356 while (*p) {
1357 parent = *p;
1358 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1359
Todd Kjosb0117bb2017-05-08 09:16:27 -07001360 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001361 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001362 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001363 p = &(*p)->rb_right;
1364 else
1365 BUG();
1366 }
1367 rb_link_node(&new_ref->rb_node_desc, parent, p);
1368 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001369
1370 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001371 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001372
Todd Kjos4cbe5752017-05-01 17:21:51 -07001373 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1374 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001375 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001376 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001377 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001378 return new_ref;
1379}
1380
Todd Kjos5346bf32016-10-20 16:43:34 -07001381static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001382{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001383 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001384
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001385 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301386 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001387 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301388 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001389
1390 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1391 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001392
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001393 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001394 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001395 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001396
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001397 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001398 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1399 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001400 /*
1401 * Clear ref->node unless we want the caller to free the node
1402 */
1403 if (!delete_node) {
1404 /*
1405 * The caller uses ref->node to determine
1406 * whether the node needs to be freed. Clear
1407 * it since the node is still alive.
1408 */
1409 ref->node = NULL;
1410 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001411
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001412 if (ref->death) {
1413 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301414 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001415 ref->proc->pid, ref->data.debug_id,
1416 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001417 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001418 binder_stats_deleted(BINDER_STAT_DEATH);
1419 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001420 binder_stats_deleted(BINDER_STAT_REF);
1421}
1422
Todd Kjosb0117bb2017-05-08 09:16:27 -07001423/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001424 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001425 * @ref: ref to be incremented
1426 * @strong: if true, strong increment, else weak
1427 * @target_list: list to queue node work on
1428 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001429 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001430 *
1431 * Return: 0, if successful, else errno
1432 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001433static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1434 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001435{
1436 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001437
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001438 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001439 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001440 ret = binder_inc_node(ref->node, 1, 1, target_list);
1441 if (ret)
1442 return ret;
1443 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001444 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001445 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001446 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001447 ret = binder_inc_node(ref->node, 0, 1, target_list);
1448 if (ret)
1449 return ret;
1450 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001451 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001452 }
1453 return 0;
1454}
1455
Todd Kjosb0117bb2017-05-08 09:16:27 -07001456/**
1457 * binder_dec_ref() - dec the ref for given handle
1458 * @ref: ref to be decremented
1459 * @strong: if true, strong decrement, else weak
1460 *
1461 * Decrement the ref.
1462 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001463 * Return: true if ref is cleaned up and ready to be freed
1464 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001465static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001466{
1467 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001468 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301469 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001470 ref->proc->pid, ref->data.debug_id,
1471 ref->data.desc, ref->data.strong,
1472 ref->data.weak);
1473 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001474 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001475 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001476 if (ref->data.strong == 0)
1477 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001478 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001479 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301480 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001481 ref->proc->pid, ref->data.debug_id,
1482 ref->data.desc, ref->data.strong,
1483 ref->data.weak);
1484 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001485 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001486 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001487 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001488 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001489 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001490 return true;
1491 }
1492 return false;
1493}
1494
1495/**
1496 * binder_get_node_from_ref() - get the node from the given proc/desc
1497 * @proc: proc containing the ref
1498 * @desc: the handle associated with the ref
1499 * @need_strong_ref: if true, only return node if ref is strong
1500 * @rdata: the id/refcount data for the ref
1501 *
1502 * Given a proc and ref handle, return the associated binder_node
1503 *
1504 * Return: a binder_node or NULL if not found or not strong when strong required
1505 */
1506static struct binder_node *binder_get_node_from_ref(
1507 struct binder_proc *proc,
1508 u32 desc, bool need_strong_ref,
1509 struct binder_ref_data *rdata)
1510{
1511 struct binder_node *node;
1512 struct binder_ref *ref;
1513
Todd Kjos5346bf32016-10-20 16:43:34 -07001514 binder_proc_lock(proc);
1515 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001516 if (!ref)
1517 goto err_no_ref;
1518 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001519 /*
1520 * Take an implicit reference on the node to ensure
1521 * it stays alive until the call to binder_put_node()
1522 */
1523 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001524 if (rdata)
1525 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001526 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001527
1528 return node;
1529
1530err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001531 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001532 return NULL;
1533}
1534
1535/**
1536 * binder_free_ref() - free the binder_ref
1537 * @ref: ref to free
1538 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001539 * Free the binder_ref. Free the binder_node indicated by ref->node
1540 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001541 */
1542static void binder_free_ref(struct binder_ref *ref)
1543{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001544 if (ref->node)
1545 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001546 kfree(ref->death);
1547 kfree(ref);
1548}
1549
1550/**
1551 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1552 * @proc: proc containing the ref
1553 * @desc: the handle associated with the ref
1554 * @increment: true=inc reference, false=dec reference
1555 * @strong: true=strong reference, false=weak reference
1556 * @rdata: the id/refcount data for the ref
1557 *
1558 * Given a proc and ref handle, increment or decrement the ref
1559 * according to "increment" arg.
1560 *
1561 * Return: 0 if successful, else errno
1562 */
1563static int binder_update_ref_for_handle(struct binder_proc *proc,
1564 uint32_t desc, bool increment, bool strong,
1565 struct binder_ref_data *rdata)
1566{
1567 int ret = 0;
1568 struct binder_ref *ref;
1569 bool delete_ref = false;
1570
Todd Kjos5346bf32016-10-20 16:43:34 -07001571 binder_proc_lock(proc);
1572 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001573 if (!ref) {
1574 ret = -EINVAL;
1575 goto err_no_ref;
1576 }
1577 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001578 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001579 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001580 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001581
1582 if (rdata)
1583 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001584 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001585
1586 if (delete_ref)
1587 binder_free_ref(ref);
1588 return ret;
1589
1590err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001591 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001592 return ret;
1593}
1594
1595/**
1596 * binder_dec_ref_for_handle() - dec the ref for given handle
1597 * @proc: proc containing the ref
1598 * @desc: the handle associated with the ref
1599 * @strong: true=strong reference, false=weak reference
1600 * @rdata: the id/refcount data for the ref
1601 *
1602 * Just calls binder_update_ref_for_handle() to decrement the ref.
1603 *
1604 * Return: 0 if successful, else errno
1605 */
1606static int binder_dec_ref_for_handle(struct binder_proc *proc,
1607 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1608{
1609 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1610}
1611
1612
1613/**
1614 * binder_inc_ref_for_node() - increment the ref for given proc/node
1615 * @proc: proc containing the ref
1616 * @node: target node
1617 * @strong: true=strong reference, false=weak reference
1618 * @target_list: worklist to use if node is incremented
1619 * @rdata: the id/refcount data for the ref
1620 *
1621 * Given a proc and node, increment the ref. Create the ref if it
1622 * doesn't already exist
1623 *
1624 * Return: 0 if successful, else errno
1625 */
1626static int binder_inc_ref_for_node(struct binder_proc *proc,
1627 struct binder_node *node,
1628 bool strong,
1629 struct list_head *target_list,
1630 struct binder_ref_data *rdata)
1631{
1632 struct binder_ref *ref;
1633 struct binder_ref *new_ref = NULL;
1634 int ret = 0;
1635
Todd Kjos5346bf32016-10-20 16:43:34 -07001636 binder_proc_lock(proc);
1637 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001638 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001639 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001640 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1641 if (!new_ref)
1642 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001643 binder_proc_lock(proc);
1644 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001645 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001646 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001647 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001648 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001649 if (new_ref && ref != new_ref)
1650 /*
1651 * Another thread created the ref first so
1652 * free the one we allocated
1653 */
1654 kfree(new_ref);
1655 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001656}
1657
Martijn Coenen995a36e2017-06-02 13:36:52 -07001658static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1659 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001660{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001661 BUG_ON(!target_thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07001662 BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
Todd Kjos21ef40a2017-03-30 18:02:13 -07001663 BUG_ON(target_thread->transaction_stack != t);
1664 BUG_ON(target_thread->transaction_stack->from != target_thread);
1665 target_thread->transaction_stack =
1666 target_thread->transaction_stack->from_parent;
1667 t->from = NULL;
1668}
1669
Todd Kjos2f993e22017-05-12 14:42:55 -07001670/**
1671 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1672 * @thread: thread to decrement
1673 *
1674 * A thread needs to be kept alive while being used to create or
1675 * handle a transaction. binder_get_txn_from() is used to safely
1676 * extract t->from from a binder_transaction and keep the thread
1677 * indicated by t->from from being freed. When done with that
1678 * binder_thread, this function is called to decrement the
1679 * tmp_ref and free if appropriate (thread has been released
1680 * and no transaction being processed by the driver)
1681 */
1682static void binder_thread_dec_tmpref(struct binder_thread *thread)
1683{
1684 /*
1685 * atomic is used to protect the counter value while
1686 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07001687 */
Todd Kjosb4827902017-05-25 15:52:17 -07001688 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001689 atomic_dec(&thread->tmp_ref);
1690 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07001691 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001692 binder_free_thread(thread);
1693 return;
1694 }
Todd Kjosb4827902017-05-25 15:52:17 -07001695 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001696}
1697
1698/**
1699 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1700 * @proc: proc to decrement
1701 *
1702 * A binder_proc needs to be kept alive while being used to create or
1703 * handle a transaction. proc->tmp_ref is incremented when
1704 * creating a new transaction or the binder_proc is currently in-use
1705 * by threads that are being released. When done with the binder_proc,
1706 * this function is called to decrement the counter and free the
1707 * proc if appropriate (proc has been released, all threads have
1708 * been released and not currenly in-use to process a transaction).
1709 */
1710static void binder_proc_dec_tmpref(struct binder_proc *proc)
1711{
Todd Kjosb4827902017-05-25 15:52:17 -07001712 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001713 proc->tmp_ref--;
1714 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1715 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07001716 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001717 binder_free_proc(proc);
1718 return;
1719 }
Todd Kjosb4827902017-05-25 15:52:17 -07001720 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001721}
1722
1723/**
1724 * binder_get_txn_from() - safely extract the "from" thread in transaction
1725 * @t: binder transaction for t->from
1726 *
1727 * Atomically return the "from" thread and increment the tmp_ref
1728 * count for the thread to ensure it stays alive until
1729 * binder_thread_dec_tmpref() is called.
1730 *
1731 * Return: the value of t->from
1732 */
1733static struct binder_thread *binder_get_txn_from(
1734 struct binder_transaction *t)
1735{
1736 struct binder_thread *from;
1737
1738 spin_lock(&t->lock);
1739 from = t->from;
1740 if (from)
1741 atomic_inc(&from->tmp_ref);
1742 spin_unlock(&t->lock);
1743 return from;
1744}
1745
Martijn Coenen995a36e2017-06-02 13:36:52 -07001746/**
1747 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1748 * @t: binder transaction for t->from
1749 *
1750 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1751 * to guarantee that the thread cannot be released while operating on it.
1752 * The caller must call binder_inner_proc_unlock() to release the inner lock
1753 * as well as call binder_dec_thread_txn() to release the reference.
1754 *
1755 * Return: the value of t->from
1756 */
1757static struct binder_thread *binder_get_txn_from_and_acq_inner(
1758 struct binder_transaction *t)
1759{
1760 struct binder_thread *from;
1761
1762 from = binder_get_txn_from(t);
1763 if (!from)
1764 return NULL;
1765 binder_inner_proc_lock(from->proc);
1766 if (t->from) {
1767 BUG_ON(from != t->from);
1768 return from;
1769 }
1770 binder_inner_proc_unlock(from->proc);
1771 binder_thread_dec_tmpref(from);
1772 return NULL;
1773}
1774
Todd Kjos21ef40a2017-03-30 18:02:13 -07001775static void binder_free_transaction(struct binder_transaction *t)
1776{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001777 if (t->buffer)
1778 t->buffer->transaction = NULL;
1779 kfree(t);
1780 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1781}
1782
1783static void binder_send_failed_reply(struct binder_transaction *t,
1784 uint32_t error_code)
1785{
1786 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001787 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001788
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001789 BUG_ON(t->flags & TF_ONE_WAY);
1790 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07001791 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001792 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07001793 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1794 "send failed reply for transaction %d to %d:%d\n",
1795 t->debug_id,
1796 target_thread->proc->pid,
1797 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001798
Martijn Coenen995a36e2017-06-02 13:36:52 -07001799 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07001800 if (target_thread->reply_error.cmd == BR_OK) {
1801 target_thread->reply_error.cmd = error_code;
Martijn Coenen995a36e2017-06-02 13:36:52 -07001802 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001803 &target_thread->reply_error.work,
Todd Kjos858b8da2017-04-21 17:35:12 -07001804 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001805 wake_up_interruptible(&target_thread->wait);
1806 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07001807 WARN(1, "Unexpected reply error: %u\n",
1808 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001809 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07001810 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001811 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07001812 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001813 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001814 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001815 next = t->from_parent;
1816
1817 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1818 "send failed reply for transaction %d, target dead\n",
1819 t->debug_id);
1820
Todd Kjos21ef40a2017-03-30 18:02:13 -07001821 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001822 if (next == NULL) {
1823 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1824 "reply failed, no target thread at root\n");
1825 return;
1826 }
1827 t = next;
1828 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1829 "reply failed, no target thread -- retry %d\n",
1830 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001831 }
1832}
1833
Martijn Coenen00c80372016-07-13 12:06:49 +02001834/**
1835 * binder_validate_object() - checks for a valid metadata object in a buffer.
1836 * @buffer: binder_buffer that we're parsing.
1837 * @offset: offset in the buffer at which to validate an object.
1838 *
1839 * Return: If there's a valid metadata object at @offset in @buffer, the
1840 * size of that object. Otherwise, it returns zero.
1841 */
1842static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1843{
1844 /* Check if we can read a header first */
1845 struct binder_object_header *hdr;
1846 size_t object_size = 0;
1847
1848 if (offset > buffer->data_size - sizeof(*hdr) ||
1849 buffer->data_size < sizeof(*hdr) ||
1850 !IS_ALIGNED(offset, sizeof(u32)))
1851 return 0;
1852
1853 /* Ok, now see if we can read a complete object. */
1854 hdr = (struct binder_object_header *)(buffer->data + offset);
1855 switch (hdr->type) {
1856 case BINDER_TYPE_BINDER:
1857 case BINDER_TYPE_WEAK_BINDER:
1858 case BINDER_TYPE_HANDLE:
1859 case BINDER_TYPE_WEAK_HANDLE:
1860 object_size = sizeof(struct flat_binder_object);
1861 break;
1862 case BINDER_TYPE_FD:
1863 object_size = sizeof(struct binder_fd_object);
1864 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02001865 case BINDER_TYPE_PTR:
1866 object_size = sizeof(struct binder_buffer_object);
1867 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02001868 case BINDER_TYPE_FDA:
1869 object_size = sizeof(struct binder_fd_array_object);
1870 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02001871 default:
1872 return 0;
1873 }
1874 if (offset <= buffer->data_size - object_size &&
1875 buffer->data_size >= object_size)
1876 return object_size;
1877 else
1878 return 0;
1879}
1880
Martijn Coenen5a6da532016-09-30 14:10:07 +02001881/**
1882 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1883 * @b: binder_buffer containing the object
1884 * @index: index in offset array at which the binder_buffer_object is
1885 * located
1886 * @start: points to the start of the offset array
1887 * @num_valid: the number of valid offsets in the offset array
1888 *
1889 * Return: If @index is within the valid range of the offset array
1890 * described by @start and @num_valid, and if there's a valid
1891 * binder_buffer_object at the offset found in index @index
1892 * of the offset array, that object is returned. Otherwise,
1893 * %NULL is returned.
1894 * Note that the offset found in index @index itself is not
1895 * verified; this function assumes that @num_valid elements
1896 * from @start were previously verified to have valid offsets.
1897 */
1898static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1899 binder_size_t index,
1900 binder_size_t *start,
1901 binder_size_t num_valid)
1902{
1903 struct binder_buffer_object *buffer_obj;
1904 binder_size_t *offp;
1905
1906 if (index >= num_valid)
1907 return NULL;
1908
1909 offp = start + index;
1910 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1911 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1912 return NULL;
1913
1914 return buffer_obj;
1915}
1916
1917/**
1918 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1919 * @b: transaction buffer
1920 * @objects_start start of objects buffer
1921 * @buffer: binder_buffer_object in which to fix up
1922 * @offset: start offset in @buffer to fix up
1923 * @last_obj: last binder_buffer_object that we fixed up in
1924 * @last_min_offset: minimum fixup offset in @last_obj
1925 *
1926 * Return: %true if a fixup in buffer @buffer at offset @offset is
1927 * allowed.
1928 *
1929 * For safety reasons, we only allow fixups inside a buffer to happen
1930 * at increasing offsets; additionally, we only allow fixup on the last
1931 * buffer object that was verified, or one of its parents.
1932 *
1933 * Example of what is allowed:
1934 *
1935 * A
1936 * B (parent = A, offset = 0)
1937 * C (parent = A, offset = 16)
1938 * D (parent = C, offset = 0)
1939 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1940 *
1941 * Examples of what is not allowed:
1942 *
1943 * Decreasing offsets within the same parent:
1944 * A
1945 * C (parent = A, offset = 16)
1946 * B (parent = A, offset = 0) // decreasing offset within A
1947 *
1948 * Referring to a parent that wasn't the last object or any of its parents:
1949 * A
1950 * B (parent = A, offset = 0)
1951 * C (parent = A, offset = 0)
1952 * C (parent = A, offset = 16)
1953 * D (parent = B, offset = 0) // B is not A or any of A's parents
1954 */
1955static bool binder_validate_fixup(struct binder_buffer *b,
1956 binder_size_t *objects_start,
1957 struct binder_buffer_object *buffer,
1958 binder_size_t fixup_offset,
1959 struct binder_buffer_object *last_obj,
1960 binder_size_t last_min_offset)
1961{
1962 if (!last_obj) {
1963 /* Nothing to fix up in */
1964 return false;
1965 }
1966
1967 while (last_obj != buffer) {
1968 /*
1969 * Safe to retrieve the parent of last_obj, since it
1970 * was already previously verified by the driver.
1971 */
1972 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1973 return false;
1974 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1975 last_obj = (struct binder_buffer_object *)
1976 (b->data + *(objects_start + last_obj->parent));
1977 }
1978 return (fixup_offset >= last_min_offset);
1979}
1980
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001981static void binder_transaction_buffer_release(struct binder_proc *proc,
1982 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001983 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001984{
Martijn Coenen5a6da532016-09-30 14:10:07 +02001985 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001986 int debug_id = buffer->debug_id;
1987
1988 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301989 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001990 proc->pid, buffer->debug_id,
1991 buffer->data_size, buffer->offsets_size, failed_at);
1992
1993 if (buffer->target_node)
1994 binder_dec_node(buffer->target_node, 1, 0);
1995
Martijn Coenen5a6da532016-09-30 14:10:07 +02001996 off_start = (binder_size_t *)(buffer->data +
1997 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001998 if (failed_at)
1999 off_end = failed_at;
2000 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002001 off_end = (void *)off_start + buffer->offsets_size;
2002 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002003 struct binder_object_header *hdr;
2004 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002005
Martijn Coenen00c80372016-07-13 12:06:49 +02002006 if (object_size == 0) {
2007 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002008 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002009 continue;
2010 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002011 hdr = (struct binder_object_header *)(buffer->data + *offp);
2012 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002013 case BINDER_TYPE_BINDER:
2014 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002015 struct flat_binder_object *fp;
2016 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002017
Martijn Coenen00c80372016-07-13 12:06:49 +02002018 fp = to_flat_binder_object(hdr);
2019 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002020 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002021 pr_err("transaction release %d bad node %016llx\n",
2022 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002023 break;
2024 }
2025 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002026 " node %d u%016llx\n",
2027 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002028 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2029 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002030 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002031 } break;
2032 case BINDER_TYPE_HANDLE:
2033 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002034 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002035 struct binder_ref_data rdata;
2036 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002037
Martijn Coenen00c80372016-07-13 12:06:49 +02002038 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002039 ret = binder_dec_ref_for_handle(proc, fp->handle,
2040 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2041
2042 if (ret) {
2043 pr_err("transaction release %d bad handle %d, ret = %d\n",
2044 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002045 break;
2046 }
2047 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002048 " ref %d desc %d\n",
2049 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002050 } break;
2051
Martijn Coenen00c80372016-07-13 12:06:49 +02002052 case BINDER_TYPE_FD: {
2053 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2054
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002055 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002056 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002057 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002058 task_close_fd(proc, fp->fd);
2059 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002060 case BINDER_TYPE_PTR:
2061 /*
2062 * Nothing to do here, this will get cleaned up when the
2063 * transaction buffer gets freed
2064 */
2065 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002066 case BINDER_TYPE_FDA: {
2067 struct binder_fd_array_object *fda;
2068 struct binder_buffer_object *parent;
2069 uintptr_t parent_buffer;
2070 u32 *fd_array;
2071 size_t fd_index;
2072 binder_size_t fd_buf_size;
2073
2074 fda = to_binder_fd_array_object(hdr);
2075 parent = binder_validate_ptr(buffer, fda->parent,
2076 off_start,
2077 offp - off_start);
2078 if (!parent) {
2079 pr_err("transaction release %d bad parent offset",
2080 debug_id);
2081 continue;
2082 }
2083 /*
2084 * Since the parent was already fixed up, convert it
2085 * back to kernel address space to access it
2086 */
2087 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002088 binder_alloc_get_user_buffer_offset(
2089 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002090
2091 fd_buf_size = sizeof(u32) * fda->num_fds;
2092 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2093 pr_err("transaction release %d invalid number of fds (%lld)\n",
2094 debug_id, (u64)fda->num_fds);
2095 continue;
2096 }
2097 if (fd_buf_size > parent->length ||
2098 fda->parent_offset > parent->length - fd_buf_size) {
2099 /* No space for all file descriptors here. */
2100 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2101 debug_id, (u64)fda->num_fds);
2102 continue;
2103 }
2104 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2105 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2106 task_close_fd(proc, fd_array[fd_index]);
2107 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002108 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002109 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002110 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002111 break;
2112 }
2113 }
2114}
2115
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002116static int binder_translate_binder(struct flat_binder_object *fp,
2117 struct binder_transaction *t,
2118 struct binder_thread *thread)
2119{
2120 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002121 struct binder_proc *proc = thread->proc;
2122 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002123 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002124 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002125
2126 node = binder_get_node(proc, fp->binder);
2127 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002128 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002129 if (!node)
2130 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002131 }
2132 if (fp->cookie != node->cookie) {
2133 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2134 proc->pid, thread->pid, (u64)fp->binder,
2135 node->debug_id, (u64)fp->cookie,
2136 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002137 ret = -EINVAL;
2138 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002139 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002140 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2141 ret = -EPERM;
2142 goto done;
2143 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002144
Todd Kjosb0117bb2017-05-08 09:16:27 -07002145 ret = binder_inc_ref_for_node(target_proc, node,
2146 fp->hdr.type == BINDER_TYPE_BINDER,
2147 &thread->todo, &rdata);
2148 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002149 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002150
2151 if (fp->hdr.type == BINDER_TYPE_BINDER)
2152 fp->hdr.type = BINDER_TYPE_HANDLE;
2153 else
2154 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2155 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002156 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002157 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002158
Todd Kjosb0117bb2017-05-08 09:16:27 -07002159 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002160 binder_debug(BINDER_DEBUG_TRANSACTION,
2161 " node %d u%016llx -> ref %d desc %d\n",
2162 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002163 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002164done:
2165 binder_put_node(node);
2166 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002167}
2168
2169static int binder_translate_handle(struct flat_binder_object *fp,
2170 struct binder_transaction *t,
2171 struct binder_thread *thread)
2172{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002173 struct binder_proc *proc = thread->proc;
2174 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002175 struct binder_node *node;
2176 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002177 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002178
Todd Kjosb0117bb2017-05-08 09:16:27 -07002179 node = binder_get_node_from_ref(proc, fp->handle,
2180 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2181 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002182 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2183 proc->pid, thread->pid, fp->handle);
2184 return -EINVAL;
2185 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002186 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2187 ret = -EPERM;
2188 goto done;
2189 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002190
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002191 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002192 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002193 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2194 fp->hdr.type = BINDER_TYPE_BINDER;
2195 else
2196 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002197 fp->binder = node->ptr;
2198 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002199 if (node->proc)
2200 binder_inner_proc_lock(node->proc);
2201 binder_inc_node_nilocked(node,
2202 fp->hdr.type == BINDER_TYPE_BINDER,
2203 0, NULL);
2204 if (node->proc)
2205 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002206 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002207 binder_debug(BINDER_DEBUG_TRANSACTION,
2208 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002209 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2210 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002211 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002212 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002213 int ret;
2214 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002215
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002216 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002217 ret = binder_inc_ref_for_node(target_proc, node,
2218 fp->hdr.type == BINDER_TYPE_HANDLE,
2219 NULL, &dest_rdata);
2220 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002221 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002222
2223 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002224 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002225 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002226 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2227 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002228 binder_debug(BINDER_DEBUG_TRANSACTION,
2229 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002230 src_rdata.debug_id, src_rdata.desc,
2231 dest_rdata.debug_id, dest_rdata.desc,
2232 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002233 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002234done:
2235 binder_put_node(node);
2236 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002237}
2238
2239static int binder_translate_fd(int fd,
2240 struct binder_transaction *t,
2241 struct binder_thread *thread,
2242 struct binder_transaction *in_reply_to)
2243{
2244 struct binder_proc *proc = thread->proc;
2245 struct binder_proc *target_proc = t->to_proc;
2246 int target_fd;
2247 struct file *file;
2248 int ret;
2249 bool target_allows_fd;
2250
2251 if (in_reply_to)
2252 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2253 else
2254 target_allows_fd = t->buffer->target_node->accept_fds;
2255 if (!target_allows_fd) {
2256 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2257 proc->pid, thread->pid,
2258 in_reply_to ? "reply" : "transaction",
2259 fd);
2260 ret = -EPERM;
2261 goto err_fd_not_accepted;
2262 }
2263
2264 file = fget(fd);
2265 if (!file) {
2266 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2267 proc->pid, thread->pid, fd);
2268 ret = -EBADF;
2269 goto err_fget;
2270 }
2271 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2272 if (ret < 0) {
2273 ret = -EPERM;
2274 goto err_security;
2275 }
2276
2277 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2278 if (target_fd < 0) {
2279 ret = -ENOMEM;
2280 goto err_get_unused_fd;
2281 }
2282 task_fd_install(target_proc, target_fd, file);
2283 trace_binder_transaction_fd(t, fd, target_fd);
2284 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2285 fd, target_fd);
2286
2287 return target_fd;
2288
2289err_get_unused_fd:
2290err_security:
2291 fput(file);
2292err_fget:
2293err_fd_not_accepted:
2294 return ret;
2295}
2296
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002297static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2298 struct binder_buffer_object *parent,
2299 struct binder_transaction *t,
2300 struct binder_thread *thread,
2301 struct binder_transaction *in_reply_to)
2302{
2303 binder_size_t fdi, fd_buf_size, num_installed_fds;
2304 int target_fd;
2305 uintptr_t parent_buffer;
2306 u32 *fd_array;
2307 struct binder_proc *proc = thread->proc;
2308 struct binder_proc *target_proc = t->to_proc;
2309
2310 fd_buf_size = sizeof(u32) * fda->num_fds;
2311 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2312 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2313 proc->pid, thread->pid, (u64)fda->num_fds);
2314 return -EINVAL;
2315 }
2316 if (fd_buf_size > parent->length ||
2317 fda->parent_offset > parent->length - fd_buf_size) {
2318 /* No space for all file descriptors here. */
2319 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2320 proc->pid, thread->pid, (u64)fda->num_fds);
2321 return -EINVAL;
2322 }
2323 /*
2324 * Since the parent was already fixed up, convert it
2325 * back to the kernel address space to access it
2326 */
Todd Kjosd325d372016-10-10 10:40:53 -07002327 parent_buffer = parent->buffer -
2328 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002329 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2330 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2331 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2332 proc->pid, thread->pid);
2333 return -EINVAL;
2334 }
2335 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2336 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2337 in_reply_to);
2338 if (target_fd < 0)
2339 goto err_translate_fd_failed;
2340 fd_array[fdi] = target_fd;
2341 }
2342 return 0;
2343
2344err_translate_fd_failed:
2345 /*
2346 * Failed to allocate fd or security error, free fds
2347 * installed so far.
2348 */
2349 num_installed_fds = fdi;
2350 for (fdi = 0; fdi < num_installed_fds; fdi++)
2351 task_close_fd(target_proc, fd_array[fdi]);
2352 return target_fd;
2353}
2354
Martijn Coenen5a6da532016-09-30 14:10:07 +02002355static int binder_fixup_parent(struct binder_transaction *t,
2356 struct binder_thread *thread,
2357 struct binder_buffer_object *bp,
2358 binder_size_t *off_start,
2359 binder_size_t num_valid,
2360 struct binder_buffer_object *last_fixup_obj,
2361 binder_size_t last_fixup_min_off)
2362{
2363 struct binder_buffer_object *parent;
2364 u8 *parent_buffer;
2365 struct binder_buffer *b = t->buffer;
2366 struct binder_proc *proc = thread->proc;
2367 struct binder_proc *target_proc = t->to_proc;
2368
2369 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2370 return 0;
2371
2372 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2373 if (!parent) {
2374 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2375 proc->pid, thread->pid);
2376 return -EINVAL;
2377 }
2378
2379 if (!binder_validate_fixup(b, off_start,
2380 parent, bp->parent_offset,
2381 last_fixup_obj,
2382 last_fixup_min_off)) {
2383 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2384 proc->pid, thread->pid);
2385 return -EINVAL;
2386 }
2387
2388 if (parent->length < sizeof(binder_uintptr_t) ||
2389 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2390 /* No space for a pointer here! */
2391 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2392 proc->pid, thread->pid);
2393 return -EINVAL;
2394 }
2395 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002396 binder_alloc_get_user_buffer_offset(
2397 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002398 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2399
2400 return 0;
2401}
2402
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002403static void binder_transaction(struct binder_proc *proc,
2404 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002405 struct binder_transaction_data *tr, int reply,
2406 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002407{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002408 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002409 struct binder_transaction *t;
2410 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002411 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002412 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002413 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002414 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002415 struct binder_thread *target_thread = NULL;
2416 struct binder_node *target_node = NULL;
2417 struct list_head *target_list;
2418 wait_queue_head_t *target_wait;
2419 struct binder_transaction *in_reply_to = NULL;
2420 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002421 uint32_t return_error = 0;
2422 uint32_t return_error_param = 0;
2423 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002424 struct binder_buffer_object *last_fixup_obj = NULL;
2425 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002426 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002427 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002428
2429 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002430 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002431 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2432 e->from_proc = proc->pid;
2433 e->from_thread = thread->pid;
2434 e->target_handle = tr->target.handle;
2435 e->data_size = tr->data_size;
2436 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002437 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002438
2439 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002440 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002441 in_reply_to = thread->transaction_stack;
2442 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002443 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302444 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002445 proc->pid, thread->pid);
2446 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002447 return_error_param = -EPROTO;
2448 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002449 goto err_empty_call_stack;
2450 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002451 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002452 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302453 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002454 proc->pid, thread->pid, in_reply_to->debug_id,
2455 in_reply_to->to_proc ?
2456 in_reply_to->to_proc->pid : 0,
2457 in_reply_to->to_thread ?
2458 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002459 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002460 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002461 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002462 return_error_param = -EPROTO;
2463 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002464 in_reply_to = NULL;
2465 goto err_bad_call_stack;
2466 }
2467 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002468 binder_inner_proc_unlock(proc);
2469 binder_set_nice(in_reply_to->saved_priority);
2470 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002471 if (target_thread == NULL) {
2472 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002473 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002474 goto err_dead_binder;
2475 }
2476 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302477 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002478 proc->pid, thread->pid,
2479 target_thread->transaction_stack ?
2480 target_thread->transaction_stack->debug_id : 0,
2481 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002482 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002483 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002484 return_error_param = -EPROTO;
2485 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002486 in_reply_to = NULL;
2487 target_thread = NULL;
2488 goto err_dead_binder;
2489 }
2490 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002491 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002492 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002493 } else {
2494 if (tr->target.handle) {
2495 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002496
Todd Kjosc37162d2017-05-26 11:56:29 -07002497 /*
2498 * There must already be a strong ref
2499 * on this node. If so, do a strong
2500 * increment on the node to ensure it
2501 * stays alive until the transaction is
2502 * done.
2503 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002504 binder_proc_lock(proc);
2505 ref = binder_get_ref_olocked(proc, tr->target.handle,
2506 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002507 if (ref) {
2508 binder_inc_node(ref->node, 1, 0, NULL);
2509 target_node = ref->node;
2510 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002511 binder_proc_unlock(proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002512 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302513 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002514 proc->pid, thread->pid);
2515 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002516 return_error_param = -EINVAL;
2517 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002518 goto err_invalid_target_handle;
2519 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002520 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002521 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002522 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002523 if (target_node == NULL) {
2524 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002525 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjose598d172017-03-22 17:19:52 -07002526 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002527 goto err_no_context_mgr_node;
2528 }
Todd Kjosc37162d2017-05-26 11:56:29 -07002529 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002530 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002531 }
2532 e->to_node = target_node->debug_id;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002533 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002534 target_proc = target_node->proc;
2535 if (target_proc == NULL) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002536 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002537 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002538 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002539 goto err_dead_binder;
2540 }
Todd Kjosb4827902017-05-25 15:52:17 -07002541 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002542 target_proc->tmp_ref++;
Todd Kjosb4827902017-05-25 15:52:17 -07002543 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002544 binder_node_unlock(target_node);
Stephen Smalley79af7302015-01-21 10:54:10 -05002545 if (security_binder_transaction(proc->tsk,
2546 target_proc->tsk) < 0) {
2547 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002548 return_error_param = -EPERM;
2549 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05002550 goto err_invalid_target_handle;
2551 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002552 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002553 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2554 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002555
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002556 tmp = thread->transaction_stack;
2557 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002558 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302559 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002560 proc->pid, thread->pid, tmp->debug_id,
2561 tmp->to_proc ? tmp->to_proc->pid : 0,
2562 tmp->to_thread ?
2563 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002564 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002565 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002566 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002567 return_error_param = -EPROTO;
2568 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002569 goto err_bad_call_stack;
2570 }
2571 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002572 struct binder_thread *from;
2573
2574 spin_lock(&tmp->lock);
2575 from = tmp->from;
2576 if (from && from->proc == target_proc) {
2577 atomic_inc(&from->tmp_ref);
2578 target_thread = from;
2579 spin_unlock(&tmp->lock);
2580 break;
2581 }
2582 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002583 tmp = tmp->from_parent;
2584 }
2585 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002586 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002587 }
2588 if (target_thread) {
2589 e->to_thread = target_thread->pid;
2590 target_list = &target_thread->todo;
2591 target_wait = &target_thread->wait;
2592 } else {
2593 target_list = &target_proc->todo;
2594 target_wait = &target_proc->wait;
2595 }
2596 e->to_proc = target_proc->pid;
2597
2598 /* TODO: reuse incoming transaction for reply */
2599 t = kzalloc(sizeof(*t), GFP_KERNEL);
2600 if (t == NULL) {
2601 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002602 return_error_param = -ENOMEM;
2603 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002604 goto err_alloc_t_failed;
2605 }
2606 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07002607 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002608
2609 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2610 if (tcomplete == NULL) {
2611 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002612 return_error_param = -ENOMEM;
2613 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002614 goto err_alloc_tcomplete_failed;
2615 }
2616 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2617
Todd Kjos1cfe6272017-05-24 13:33:28 -07002618 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002619
2620 if (reply)
2621 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002622 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002623 proc->pid, thread->pid, t->debug_id,
2624 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002625 (u64)tr->data.ptr.buffer,
2626 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002627 (u64)tr->data_size, (u64)tr->offsets_size,
2628 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002629 else
2630 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002631 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002632 proc->pid, thread->pid, t->debug_id,
2633 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002634 (u64)tr->data.ptr.buffer,
2635 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002636 (u64)tr->data_size, (u64)tr->offsets_size,
2637 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002638
2639 if (!reply && !(tr->flags & TF_ONE_WAY))
2640 t->from = thread;
2641 else
2642 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03002643 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002644 t->to_proc = target_proc;
2645 t->to_thread = target_thread;
2646 t->code = tr->code;
2647 t->flags = tr->flags;
2648 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002649
2650 trace_binder_transaction(reply, t, target_node);
2651
Todd Kjosd325d372016-10-10 10:40:53 -07002652 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02002653 tr->offsets_size, extra_buffers_size,
2654 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07002655 if (IS_ERR(t->buffer)) {
2656 /*
2657 * -ESRCH indicates VMA cleared. The target is dying.
2658 */
2659 return_error_param = PTR_ERR(t->buffer);
2660 return_error = return_error_param == -ESRCH ?
2661 BR_DEAD_REPLY : BR_FAILED_REPLY;
2662 return_error_line = __LINE__;
2663 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002664 goto err_binder_alloc_buf_failed;
2665 }
2666 t->buffer->allow_user_free = 0;
2667 t->buffer->debug_id = t->debug_id;
2668 t->buffer->transaction = t;
2669 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002670 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002671 off_start = (binder_size_t *)(t->buffer->data +
2672 ALIGN(tr->data_size, sizeof(void *)));
2673 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002674
Arve Hjønnevågda498892014-02-21 14:40:26 -08002675 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2676 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302677 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2678 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002679 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002680 return_error_param = -EFAULT;
2681 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002682 goto err_copy_data_failed;
2683 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002684 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2685 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302686 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2687 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002688 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002689 return_error_param = -EFAULT;
2690 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002691 goto err_copy_data_failed;
2692 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002693 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2694 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2695 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002696 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002697 return_error_param = -EINVAL;
2698 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002699 goto err_bad_offset;
2700 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02002701 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2702 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2703 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05302704 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002705 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002706 return_error_param = -EINVAL;
2707 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002708 goto err_bad_offset;
2709 }
2710 off_end = (void *)off_start + tr->offsets_size;
2711 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2712 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002713 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002714 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002715 struct binder_object_header *hdr;
2716 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002717
Martijn Coenen00c80372016-07-13 12:06:49 +02002718 if (object_size == 0 || *offp < off_min) {
2719 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002720 proc->pid, thread->pid, (u64)*offp,
2721 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02002722 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002723 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002724 return_error_param = -EINVAL;
2725 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002726 goto err_bad_offset;
2727 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002728
2729 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2730 off_min = *offp + object_size;
2731 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002732 case BINDER_TYPE_BINDER:
2733 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002734 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002735
Martijn Coenen00c80372016-07-13 12:06:49 +02002736 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002737 ret = binder_translate_binder(fp, t, thread);
2738 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02002739 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002740 return_error_param = ret;
2741 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002742 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002743 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002744 } break;
2745 case BINDER_TYPE_HANDLE:
2746 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002747 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002748
Martijn Coenen00c80372016-07-13 12:06:49 +02002749 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002750 ret = binder_translate_handle(fp, t, thread);
2751 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002752 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002753 return_error_param = ret;
2754 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002755 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002756 }
2757 } break;
2758
2759 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002760 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002761 int target_fd = binder_translate_fd(fp->fd, t, thread,
2762 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002763
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002764 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002765 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002766 return_error_param = target_fd;
2767 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002768 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002769 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002770 fp->pad_binder = 0;
2771 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002772 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002773 case BINDER_TYPE_FDA: {
2774 struct binder_fd_array_object *fda =
2775 to_binder_fd_array_object(hdr);
2776 struct binder_buffer_object *parent =
2777 binder_validate_ptr(t->buffer, fda->parent,
2778 off_start,
2779 offp - off_start);
2780 if (!parent) {
2781 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2782 proc->pid, thread->pid);
2783 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002784 return_error_param = -EINVAL;
2785 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002786 goto err_bad_parent;
2787 }
2788 if (!binder_validate_fixup(t->buffer, off_start,
2789 parent, fda->parent_offset,
2790 last_fixup_obj,
2791 last_fixup_min_off)) {
2792 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2793 proc->pid, thread->pid);
2794 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002795 return_error_param = -EINVAL;
2796 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002797 goto err_bad_parent;
2798 }
2799 ret = binder_translate_fd_array(fda, parent, t, thread,
2800 in_reply_to);
2801 if (ret < 0) {
2802 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002803 return_error_param = ret;
2804 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002805 goto err_translate_failed;
2806 }
2807 last_fixup_obj = parent;
2808 last_fixup_min_off =
2809 fda->parent_offset + sizeof(u32) * fda->num_fds;
2810 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002811 case BINDER_TYPE_PTR: {
2812 struct binder_buffer_object *bp =
2813 to_binder_buffer_object(hdr);
2814 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002815
Martijn Coenen5a6da532016-09-30 14:10:07 +02002816 if (bp->length > buf_left) {
2817 binder_user_error("%d:%d got transaction with too large buffer\n",
2818 proc->pid, thread->pid);
2819 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002820 return_error_param = -EINVAL;
2821 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002822 goto err_bad_offset;
2823 }
2824 if (copy_from_user(sg_bufp,
2825 (const void __user *)(uintptr_t)
2826 bp->buffer, bp->length)) {
2827 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2828 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07002829 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002830 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002831 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002832 goto err_copy_data_failed;
2833 }
2834 /* Fixup buffer pointer to target proc address space */
2835 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07002836 binder_alloc_get_user_buffer_offset(
2837 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002838 sg_bufp += ALIGN(bp->length, sizeof(u64));
2839
2840 ret = binder_fixup_parent(t, thread, bp, off_start,
2841 offp - off_start,
2842 last_fixup_obj,
2843 last_fixup_min_off);
2844 if (ret < 0) {
2845 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002846 return_error_param = ret;
2847 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002848 goto err_translate_failed;
2849 }
2850 last_fixup_obj = bp;
2851 last_fixup_min_off = 0;
2852 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002853 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002854 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002855 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002856 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002857 return_error_param = -EINVAL;
2858 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002859 goto err_bad_object_type;
2860 }
2861 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002862 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002863 binder_enqueue_work(proc, tcomplete, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002864 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002865
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002866 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002867 binder_inner_proc_lock(target_proc);
2868 if (target_thread->is_dead) {
2869 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002870 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002871 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002872 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002873 binder_pop_transaction_ilocked(target_thread, in_reply_to);
2874 binder_enqueue_work_ilocked(&t->work, target_list);
2875 binder_inner_proc_unlock(target_proc);
Todd Kjos21ef40a2017-03-30 18:02:13 -07002876 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002877 } else if (!(t->flags & TF_ONE_WAY)) {
2878 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002879 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002880 t->need_reply = 1;
2881 t->from_parent = thread->transaction_stack;
2882 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002883 binder_inner_proc_unlock(proc);
2884 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002885 if (target_proc->is_dead ||
2886 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002887 binder_inner_proc_unlock(target_proc);
2888 binder_inner_proc_lock(proc);
2889 binder_pop_transaction_ilocked(thread, t);
2890 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002891 goto err_dead_proc_or_thread;
2892 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002893 binder_enqueue_work_ilocked(&t->work, target_list);
2894 binder_inner_proc_unlock(target_proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002895 } else {
2896 BUG_ON(target_node == NULL);
2897 BUG_ON(t->buffer->async_transaction != 1);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002898 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002899 if (target_node->has_async_transaction) {
2900 target_list = &target_node->async_todo;
2901 target_wait = NULL;
2902 } else
2903 target_node->has_async_transaction = 1;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002904 /*
2905 * Test/set of has_async_transaction
2906 * must be atomic with enqueue on
2907 * async_todo
2908 */
Martijn Coenen995a36e2017-06-02 13:36:52 -07002909 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002910 if (target_proc->is_dead ||
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002911 (target_thread && target_thread->is_dead)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002912 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002913 binder_node_unlock(target_node);
Todd Kjos2f993e22017-05-12 14:42:55 -07002914 goto err_dead_proc_or_thread;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002915 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002916 binder_enqueue_work_ilocked(&t->work, target_list);
2917 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002918 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002919 }
Riley Andrewsb5968812015-09-01 12:42:07 -07002920 if (target_wait) {
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002921 if (reply || !(tr->flags & TF_ONE_WAY))
Riley Andrewsb5968812015-09-01 12:42:07 -07002922 wake_up_interruptible_sync(target_wait);
2923 else
2924 wake_up_interruptible(target_wait);
2925 }
Todd Kjos2f993e22017-05-12 14:42:55 -07002926 if (target_thread)
2927 binder_thread_dec_tmpref(target_thread);
2928 binder_proc_dec_tmpref(target_proc);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002929 /*
2930 * write barrier to synchronize with initialization
2931 * of log entry
2932 */
2933 smp_wmb();
2934 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002935 return;
2936
Todd Kjos2f993e22017-05-12 14:42:55 -07002937err_dead_proc_or_thread:
2938 return_error = BR_DEAD_REPLY;
2939 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002940err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002941err_bad_object_type:
2942err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002943err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002944err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002945 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002946 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjosc37162d2017-05-26 11:56:29 -07002947 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002948 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07002949 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002950err_binder_alloc_buf_failed:
2951 kfree(tcomplete);
2952 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2953err_alloc_tcomplete_failed:
2954 kfree(t);
2955 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2956err_alloc_t_failed:
2957err_bad_call_stack:
2958err_empty_call_stack:
2959err_dead_binder:
2960err_invalid_target_handle:
2961err_no_context_mgr_node:
Todd Kjos2f993e22017-05-12 14:42:55 -07002962 if (target_thread)
2963 binder_thread_dec_tmpref(target_thread);
2964 if (target_proc)
2965 binder_proc_dec_tmpref(target_proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002966 if (target_node)
2967 binder_dec_node(target_node, 1, 0);
2968
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002969 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07002970 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2971 proc->pid, thread->pid, return_error, return_error_param,
2972 (u64)tr->data_size, (u64)tr->offsets_size,
2973 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002974
2975 {
2976 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09002977
Todd Kjose598d172017-03-22 17:19:52 -07002978 e->return_error = return_error;
2979 e->return_error_param = return_error_param;
2980 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002981 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2982 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002983 /*
2984 * write barrier to synchronize with initialization
2985 * of log entry
2986 */
2987 smp_wmb();
2988 WRITE_ONCE(e->debug_id_done, t_debug_id);
2989 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002990 }
2991
Todd Kjos858b8da2017-04-21 17:35:12 -07002992 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002993 if (in_reply_to) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002994 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002995 binder_enqueue_work(thread->proc,
2996 &thread->return_error.work,
2997 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002998 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07002999 } else {
3000 thread->return_error.cmd = return_error;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003001 binder_enqueue_work(thread->proc,
3002 &thread->return_error.work,
3003 &thread->todo);
Todd Kjos858b8da2017-04-21 17:35:12 -07003004 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003005}
3006
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003007static int binder_thread_write(struct binder_proc *proc,
3008 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003009 binder_uintptr_t binder_buffer, size_t size,
3010 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003011{
3012 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003013 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003014 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003015 void __user *ptr = buffer + *consumed;
3016 void __user *end = buffer + size;
3017
Todd Kjos858b8da2017-04-21 17:35:12 -07003018 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003019 int ret;
3020
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003021 if (get_user(cmd, (uint32_t __user *)ptr))
3022 return -EFAULT;
3023 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003024 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003025 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003026 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3027 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3028 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003029 }
3030 switch (cmd) {
3031 case BC_INCREFS:
3032 case BC_ACQUIRE:
3033 case BC_RELEASE:
3034 case BC_DECREFS: {
3035 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003036 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003037 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3038 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3039 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003040
3041 if (get_user(target, (uint32_t __user *)ptr))
3042 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003043
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003044 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003045 ret = -1;
3046 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003047 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003048 mutex_lock(&context->context_mgr_node_lock);
3049 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003050 if (ctx_mgr_node)
3051 ret = binder_inc_ref_for_node(
3052 proc, ctx_mgr_node,
3053 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003054 mutex_unlock(&context->context_mgr_node_lock);
3055 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003056 if (ret)
3057 ret = binder_update_ref_for_handle(
3058 proc, target, increment, strong,
3059 &rdata);
3060 if (!ret && rdata.desc != target) {
3061 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3062 proc->pid, thread->pid,
3063 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003064 }
3065 switch (cmd) {
3066 case BC_INCREFS:
3067 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003068 break;
3069 case BC_ACQUIRE:
3070 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003071 break;
3072 case BC_RELEASE:
3073 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003074 break;
3075 case BC_DECREFS:
3076 default:
3077 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003078 break;
3079 }
3080 if (ret) {
3081 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3082 proc->pid, thread->pid, debug_string,
3083 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003084 break;
3085 }
3086 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003087 "%d:%d %s ref %d desc %d s %d w %d\n",
3088 proc->pid, thread->pid, debug_string,
3089 rdata.debug_id, rdata.desc, rdata.strong,
3090 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003091 break;
3092 }
3093 case BC_INCREFS_DONE:
3094 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003095 binder_uintptr_t node_ptr;
3096 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003097 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003098 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003099
Arve Hjønnevågda498892014-02-21 14:40:26 -08003100 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003101 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003102 ptr += sizeof(binder_uintptr_t);
3103 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003104 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003105 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003106 node = binder_get_node(proc, node_ptr);
3107 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003108 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003109 proc->pid, thread->pid,
3110 cmd == BC_INCREFS_DONE ?
3111 "BC_INCREFS_DONE" :
3112 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003113 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003114 break;
3115 }
3116 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003117 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003118 proc->pid, thread->pid,
3119 cmd == BC_INCREFS_DONE ?
3120 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003121 (u64)node_ptr, node->debug_id,
3122 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003123 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003124 break;
3125 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003126 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003127 if (cmd == BC_ACQUIRE_DONE) {
3128 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303129 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003130 proc->pid, thread->pid,
3131 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003132 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003133 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003134 break;
3135 }
3136 node->pending_strong_ref = 0;
3137 } else {
3138 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303139 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003140 proc->pid, thread->pid,
3141 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003142 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003143 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003144 break;
3145 }
3146 node->pending_weak_ref = 0;
3147 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003148 free_node = binder_dec_node_nilocked(node,
3149 cmd == BC_ACQUIRE_DONE, 0);
3150 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003151 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003152 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003153 proc->pid, thread->pid,
3154 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003155 node->debug_id, node->local_strong_refs,
3156 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003157 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003158 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003159 break;
3160 }
3161 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303162 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003163 return -EINVAL;
3164 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303165 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003166 return -EINVAL;
3167
3168 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003169 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003170 struct binder_buffer *buffer;
3171
Arve Hjønnevågda498892014-02-21 14:40:26 -08003172 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003173 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003174 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003175
Todd Kjos076072a2017-04-21 14:32:11 -07003176 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3177 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003178 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003179 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3180 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003181 break;
3182 }
3183 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003184 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3185 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003186 break;
3187 }
3188 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003189 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3190 proc->pid, thread->pid, (u64)data_ptr,
3191 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003192 buffer->transaction ? "active" : "finished");
3193
3194 if (buffer->transaction) {
3195 buffer->transaction->buffer = NULL;
3196 buffer->transaction = NULL;
3197 }
3198 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003199 struct binder_node *buf_node;
3200 struct binder_work *w;
3201
3202 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003203 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003204 BUG_ON(!buf_node->has_async_transaction);
3205 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003206 w = binder_dequeue_work_head_ilocked(
3207 &buf_node->async_todo);
3208 if (!w)
3209 buf_node->has_async_transaction = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003210 else
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003211 binder_enqueue_work_ilocked(
3212 w, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003213 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003214 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003215 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003216 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003217 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003218 break;
3219 }
3220
Martijn Coenen5a6da532016-09-30 14:10:07 +02003221 case BC_TRANSACTION_SG:
3222 case BC_REPLY_SG: {
3223 struct binder_transaction_data_sg tr;
3224
3225 if (copy_from_user(&tr, ptr, sizeof(tr)))
3226 return -EFAULT;
3227 ptr += sizeof(tr);
3228 binder_transaction(proc, thread, &tr.transaction_data,
3229 cmd == BC_REPLY_SG, tr.buffers_size);
3230 break;
3231 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003232 case BC_TRANSACTION:
3233 case BC_REPLY: {
3234 struct binder_transaction_data tr;
3235
3236 if (copy_from_user(&tr, ptr, sizeof(tr)))
3237 return -EFAULT;
3238 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003239 binder_transaction(proc, thread, &tr,
3240 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003241 break;
3242 }
3243
3244 case BC_REGISTER_LOOPER:
3245 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303246 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003247 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003248 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003249 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3250 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303251 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003252 proc->pid, thread->pid);
3253 } else if (proc->requested_threads == 0) {
3254 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303255 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003256 proc->pid, thread->pid);
3257 } else {
3258 proc->requested_threads--;
3259 proc->requested_threads_started++;
3260 }
3261 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003262 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003263 break;
3264 case BC_ENTER_LOOPER:
3265 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303266 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003267 proc->pid, thread->pid);
3268 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3269 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303270 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003271 proc->pid, thread->pid);
3272 }
3273 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3274 break;
3275 case BC_EXIT_LOOPER:
3276 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303277 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003278 proc->pid, thread->pid);
3279 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3280 break;
3281
3282 case BC_REQUEST_DEATH_NOTIFICATION:
3283 case BC_CLEAR_DEATH_NOTIFICATION: {
3284 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003285 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003286 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003287 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003288
3289 if (get_user(target, (uint32_t __user *)ptr))
3290 return -EFAULT;
3291 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003292 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003293 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003294 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003295 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3296 /*
3297 * Allocate memory for death notification
3298 * before taking lock
3299 */
3300 death = kzalloc(sizeof(*death), GFP_KERNEL);
3301 if (death == NULL) {
3302 WARN_ON(thread->return_error.cmd !=
3303 BR_OK);
3304 thread->return_error.cmd = BR_ERROR;
3305 binder_enqueue_work(
3306 thread->proc,
3307 &thread->return_error.work,
3308 &thread->todo);
3309 binder_debug(
3310 BINDER_DEBUG_FAILED_TRANSACTION,
3311 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3312 proc->pid, thread->pid);
3313 break;
3314 }
3315 }
3316 binder_proc_lock(proc);
3317 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003318 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303319 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003320 proc->pid, thread->pid,
3321 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3322 "BC_REQUEST_DEATH_NOTIFICATION" :
3323 "BC_CLEAR_DEATH_NOTIFICATION",
3324 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003325 binder_proc_unlock(proc);
3326 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003327 break;
3328 }
3329
3330 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003331 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003332 proc->pid, thread->pid,
3333 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3334 "BC_REQUEST_DEATH_NOTIFICATION" :
3335 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003336 (u64)cookie, ref->data.debug_id,
3337 ref->data.desc, ref->data.strong,
3338 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003339
3340 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3341 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303342 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003343 proc->pid, thread->pid);
Todd Kjos5346bf32016-10-20 16:43:34 -07003344 binder_proc_unlock(proc);
3345 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003346 break;
3347 }
3348 binder_stats_created(BINDER_STAT_DEATH);
3349 INIT_LIST_HEAD(&death->work.entry);
3350 death->cookie = cookie;
3351 ref->death = death;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003352 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003353 if (ref->node->proc == NULL) {
3354 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003355 if (thread->looper &
3356 (BINDER_LOOPER_STATE_REGISTERED |
3357 BINDER_LOOPER_STATE_ENTERED))
3358 binder_enqueue_work(
3359 proc,
3360 &ref->death->work,
3361 &thread->todo);
3362 else {
3363 binder_enqueue_work(
3364 proc,
3365 &ref->death->work,
3366 &proc->todo);
3367 wake_up_interruptible(
3368 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003369 }
3370 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003371 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003372 } else {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003373 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003374 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303375 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003376 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003377 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003378 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003379 break;
3380 }
3381 death = ref->death;
3382 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003383 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003384 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003385 (u64)death->cookie,
3386 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003387 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003388 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003389 break;
3390 }
3391 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003392 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003393 if (list_empty(&death->work.entry)) {
3394 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003395 if (thread->looper &
3396 (BINDER_LOOPER_STATE_REGISTERED |
3397 BINDER_LOOPER_STATE_ENTERED))
3398 binder_enqueue_work_ilocked(
3399 &death->work,
3400 &thread->todo);
3401 else {
3402 binder_enqueue_work_ilocked(
3403 &death->work,
3404 &proc->todo);
3405 wake_up_interruptible(
3406 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003407 }
3408 } else {
3409 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3410 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3411 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003412 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003413 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003414 }
Todd Kjos5346bf32016-10-20 16:43:34 -07003415 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003416 } break;
3417 case BC_DEAD_BINDER_DONE: {
3418 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003419 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003420 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003421
Arve Hjønnevågda498892014-02-21 14:40:26 -08003422 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003423 return -EFAULT;
3424
Lisa Du7a64cd82016-02-17 09:32:52 +08003425 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003426 binder_inner_proc_lock(proc);
3427 list_for_each_entry(w, &proc->delivered_death,
3428 entry) {
3429 struct binder_ref_death *tmp_death =
3430 container_of(w,
3431 struct binder_ref_death,
3432 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003433
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003434 if (tmp_death->cookie == cookie) {
3435 death = tmp_death;
3436 break;
3437 }
3438 }
3439 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003440 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3441 proc->pid, thread->pid, (u64)cookie,
3442 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003443 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003444 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3445 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003446 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003447 break;
3448 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003449 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003450 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3451 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003452 if (thread->looper &
3453 (BINDER_LOOPER_STATE_REGISTERED |
3454 BINDER_LOOPER_STATE_ENTERED))
3455 binder_enqueue_work_ilocked(
3456 &death->work, &thread->todo);
3457 else {
3458 binder_enqueue_work_ilocked(
3459 &death->work,
3460 &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003461 wake_up_interruptible(&proc->wait);
3462 }
3463 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003464 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003465 } break;
3466
3467 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303468 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003469 proc->pid, thread->pid, cmd);
3470 return -EINVAL;
3471 }
3472 *consumed = ptr - buffer;
3473 }
3474 return 0;
3475}
3476
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003477static void binder_stat_br(struct binder_proc *proc,
3478 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003479{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003480 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003481 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003482 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3483 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3484 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003485 }
3486}
3487
3488static int binder_has_proc_work(struct binder_proc *proc,
3489 struct binder_thread *thread)
3490{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003491 return !binder_worklist_empty(proc, &proc->todo) ||
3492 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003493}
3494
3495static int binder_has_thread_work(struct binder_thread *thread)
3496{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003497 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3498 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003499}
3500
Todd Kjos60792612017-05-24 10:51:01 -07003501static int binder_put_node_cmd(struct binder_proc *proc,
3502 struct binder_thread *thread,
3503 void __user **ptrp,
3504 binder_uintptr_t node_ptr,
3505 binder_uintptr_t node_cookie,
3506 int node_debug_id,
3507 uint32_t cmd, const char *cmd_name)
3508{
3509 void __user *ptr = *ptrp;
3510
3511 if (put_user(cmd, (uint32_t __user *)ptr))
3512 return -EFAULT;
3513 ptr += sizeof(uint32_t);
3514
3515 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3516 return -EFAULT;
3517 ptr += sizeof(binder_uintptr_t);
3518
3519 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3520 return -EFAULT;
3521 ptr += sizeof(binder_uintptr_t);
3522
3523 binder_stat_br(proc, thread, cmd);
3524 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3525 proc->pid, thread->pid, cmd_name, node_debug_id,
3526 (u64)node_ptr, (u64)node_cookie);
3527
3528 *ptrp = ptr;
3529 return 0;
3530}
3531
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003532static int binder_thread_read(struct binder_proc *proc,
3533 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003534 binder_uintptr_t binder_buffer, size_t size,
3535 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003536{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003537 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003538 void __user *ptr = buffer + *consumed;
3539 void __user *end = buffer + size;
3540
3541 int ret = 0;
3542 int wait_for_proc_work;
3543
3544 if (*consumed == 0) {
3545 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3546 return -EFAULT;
3547 ptr += sizeof(uint32_t);
3548 }
3549
3550retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07003551 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003552 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07003553 binder_worklist_empty_ilocked(&thread->todo);
Todd Kjosd600e902017-05-25 17:35:02 -07003554 if (wait_for_proc_work)
3555 proc->ready_threads++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003556 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003557
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003558 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003559
3560 binder_unlock(__func__);
3561
3562 trace_binder_wait_for_work(wait_for_proc_work,
3563 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003564 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003565 if (wait_for_proc_work) {
3566 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3567 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303568 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003569 proc->pid, thread->pid, thread->looper);
3570 wait_event_interruptible(binder_user_error_wait,
3571 binder_stop_on_user_error < 2);
3572 }
3573 binder_set_nice(proc->default_priority);
3574 if (non_block) {
3575 if (!binder_has_proc_work(proc, thread))
3576 ret = -EAGAIN;
3577 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003578 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003579 } else {
3580 if (non_block) {
3581 if (!binder_has_thread_work(thread))
3582 ret = -EAGAIN;
3583 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003584 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003585 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003586
3587 binder_lock(__func__);
3588
Todd Kjosd600e902017-05-25 17:35:02 -07003589 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003590 if (wait_for_proc_work)
3591 proc->ready_threads--;
Todd Kjosd600e902017-05-25 17:35:02 -07003592 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003593 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3594
3595 if (ret)
3596 return ret;
3597
3598 while (1) {
3599 uint32_t cmd;
3600 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003601 struct binder_work *w = NULL;
3602 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003603 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07003604 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003605
Todd Kjose7f23ed2017-03-21 13:06:01 -07003606 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003607 if (!binder_worklist_empty_ilocked(&thread->todo))
3608 list = &thread->todo;
3609 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3610 wait_for_proc_work)
3611 list = &proc->todo;
3612 else {
3613 binder_inner_proc_unlock(proc);
3614
Dmitry Voytik395262a2014-09-08 18:16:34 +04003615 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08003616 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003617 goto retry;
3618 break;
3619 }
3620
Todd Kjose7f23ed2017-03-21 13:06:01 -07003621 if (end - ptr < sizeof(tr) + 4) {
3622 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003623 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003624 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003625 w = binder_dequeue_work_head_ilocked(list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003626
3627 switch (w->type) {
3628 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003629 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003630 t = container_of(w, struct binder_transaction, work);
3631 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003632 case BINDER_WORK_RETURN_ERROR: {
3633 struct binder_error *e = container_of(
3634 w, struct binder_error, work);
3635
3636 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003637 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07003638 if (put_user(e->cmd, (uint32_t __user *)ptr))
3639 return -EFAULT;
3640 e->cmd = BR_OK;
3641 ptr += sizeof(uint32_t);
3642
3643 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07003644 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003645 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003646 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003647 cmd = BR_TRANSACTION_COMPLETE;
3648 if (put_user(cmd, (uint32_t __user *)ptr))
3649 return -EFAULT;
3650 ptr += sizeof(uint32_t);
3651
3652 binder_stat_br(proc, thread, cmd);
3653 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303654 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003655 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003656 kfree(w);
3657 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3658 } break;
3659 case BINDER_WORK_NODE: {
3660 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07003661 int strong, weak;
3662 binder_uintptr_t node_ptr = node->ptr;
3663 binder_uintptr_t node_cookie = node->cookie;
3664 int node_debug_id = node->debug_id;
3665 int has_weak_ref;
3666 int has_strong_ref;
3667 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09003668
Todd Kjos60792612017-05-24 10:51:01 -07003669 BUG_ON(proc != node->proc);
3670 strong = node->internal_strong_refs ||
3671 node->local_strong_refs;
3672 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07003673 node->local_weak_refs ||
3674 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07003675 has_strong_ref = node->has_strong_ref;
3676 has_weak_ref = node->has_weak_ref;
3677
3678 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003679 node->has_weak_ref = 1;
3680 node->pending_weak_ref = 1;
3681 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003682 }
3683 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003684 node->has_strong_ref = 1;
3685 node->pending_strong_ref = 1;
3686 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003687 }
3688 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003689 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003690 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003691 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003692 if (!weak && !strong) {
3693 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3694 "%d:%d node %d u%016llx c%016llx deleted\n",
3695 proc->pid, thread->pid,
3696 node_debug_id,
3697 (u64)node_ptr,
3698 (u64)node_cookie);
3699 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003700 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003701 binder_node_lock(node);
3702 /*
3703 * Acquire the node lock before freeing the
3704 * node to serialize with other threads that
3705 * may have been holding the node lock while
3706 * decrementing this node (avoids race where
3707 * this thread frees while the other thread
3708 * is unlocking the node after the final
3709 * decrement)
3710 */
3711 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003712 binder_free_node(node);
3713 } else
3714 binder_inner_proc_unlock(proc);
3715
Todd Kjos60792612017-05-24 10:51:01 -07003716 if (weak && !has_weak_ref)
3717 ret = binder_put_node_cmd(
3718 proc, thread, &ptr, node_ptr,
3719 node_cookie, node_debug_id,
3720 BR_INCREFS, "BR_INCREFS");
3721 if (!ret && strong && !has_strong_ref)
3722 ret = binder_put_node_cmd(
3723 proc, thread, &ptr, node_ptr,
3724 node_cookie, node_debug_id,
3725 BR_ACQUIRE, "BR_ACQUIRE");
3726 if (!ret && !strong && has_strong_ref)
3727 ret = binder_put_node_cmd(
3728 proc, thread, &ptr, node_ptr,
3729 node_cookie, node_debug_id,
3730 BR_RELEASE, "BR_RELEASE");
3731 if (!ret && !weak && has_weak_ref)
3732 ret = binder_put_node_cmd(
3733 proc, thread, &ptr, node_ptr,
3734 node_cookie, node_debug_id,
3735 BR_DECREFS, "BR_DECREFS");
3736 if (orig_ptr == ptr)
3737 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3738 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3739 proc->pid, thread->pid,
3740 node_debug_id,
3741 (u64)node_ptr,
3742 (u64)node_cookie);
3743 if (ret)
3744 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003745 } break;
3746 case BINDER_WORK_DEAD_BINDER:
3747 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3748 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3749 struct binder_ref_death *death;
3750 uint32_t cmd;
3751
3752 death = container_of(w, struct binder_ref_death, work);
3753 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3754 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3755 else
3756 cmd = BR_DEAD_BINDER;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003757 /*
3758 * TODO: there is a race condition between
3759 * death notification requests and delivery
3760 * of the notifications. This will be handled
3761 * in a later patch.
3762 */
3763 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003764 if (put_user(cmd, (uint32_t __user *)ptr))
3765 return -EFAULT;
3766 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003767 if (put_user(death->cookie,
3768 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003769 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003770 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003771 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003772 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003773 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003774 proc->pid, thread->pid,
3775 cmd == BR_DEAD_BINDER ?
3776 "BR_DEAD_BINDER" :
3777 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003778 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003779
3780 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003781 kfree(death);
3782 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003783 } else {
3784 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003785 binder_enqueue_work_ilocked(
3786 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003787 binder_inner_proc_unlock(proc);
3788 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003789 if (cmd == BR_DEAD_BINDER)
3790 goto done; /* DEAD_BINDER notifications can cause transactions */
3791 } break;
3792 }
3793
3794 if (!t)
3795 continue;
3796
3797 BUG_ON(t->buffer == NULL);
3798 if (t->buffer->target_node) {
3799 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09003800
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003801 tr.target.ptr = target_node->ptr;
3802 tr.cookie = target_node->cookie;
3803 t->saved_priority = task_nice(current);
3804 if (t->priority < target_node->min_priority &&
3805 !(t->flags & TF_ONE_WAY))
3806 binder_set_nice(t->priority);
3807 else if (!(t->flags & TF_ONE_WAY) ||
3808 t->saved_priority > target_node->min_priority)
3809 binder_set_nice(target_node->min_priority);
3810 cmd = BR_TRANSACTION;
3811 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003812 tr.target.ptr = 0;
3813 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003814 cmd = BR_REPLY;
3815 }
3816 tr.code = t->code;
3817 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06003818 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003819
Todd Kjos2f993e22017-05-12 14:42:55 -07003820 t_from = binder_get_txn_from(t);
3821 if (t_from) {
3822 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09003823
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003824 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08003825 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003826 } else {
3827 tr.sender_pid = 0;
3828 }
3829
3830 tr.data_size = t->buffer->data_size;
3831 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07003832 tr.data.ptr.buffer = (binder_uintptr_t)
3833 ((uintptr_t)t->buffer->data +
3834 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003835 tr.data.ptr.offsets = tr.data.ptr.buffer +
3836 ALIGN(t->buffer->data_size,
3837 sizeof(void *));
3838
Todd Kjos2f993e22017-05-12 14:42:55 -07003839 if (put_user(cmd, (uint32_t __user *)ptr)) {
3840 if (t_from)
3841 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003842 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003843 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003844 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07003845 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3846 if (t_from)
3847 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003848 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003849 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003850 ptr += sizeof(tr);
3851
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003852 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003853 binder_stat_br(proc, thread, cmd);
3854 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003855 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003856 proc->pid, thread->pid,
3857 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3858 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07003859 t->debug_id, t_from ? t_from->proc->pid : 0,
3860 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003861 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003862 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003863
Todd Kjos2f993e22017-05-12 14:42:55 -07003864 if (t_from)
3865 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003866 t->buffer->allow_user_free = 1;
3867 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003868 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003869 t->to_parent = thread->transaction_stack;
3870 t->to_thread = thread;
3871 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003872 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003873 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07003874 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003875 }
3876 break;
3877 }
3878
3879done:
3880
3881 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07003882 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003883 if (proc->requested_threads + proc->ready_threads == 0 &&
3884 proc->requested_threads_started < proc->max_threads &&
3885 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3886 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3887 /*spawn a new thread if we leave this out */) {
3888 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07003889 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003890 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303891 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003892 proc->pid, thread->pid);
3893 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3894 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003895 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07003896 } else
3897 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003898 return 0;
3899}
3900
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003901static void binder_release_work(struct binder_proc *proc,
3902 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003903{
3904 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09003905
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003906 while (1) {
3907 w = binder_dequeue_work_head(proc, list);
3908 if (!w)
3909 return;
3910
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003911 switch (w->type) {
3912 case BINDER_WORK_TRANSACTION: {
3913 struct binder_transaction *t;
3914
3915 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003916 if (t->buffer->target_node &&
3917 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003918 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003919 } else {
3920 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303921 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003922 t->debug_id);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003923 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003924 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003925 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003926 case BINDER_WORK_RETURN_ERROR: {
3927 struct binder_error *e = container_of(
3928 w, struct binder_error, work);
3929
3930 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3931 "undelivered TRANSACTION_ERROR: %u\n",
3932 e->cmd);
3933 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003934 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003935 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303936 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003937 kfree(w);
3938 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3939 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003940 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3941 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3942 struct binder_ref_death *death;
3943
3944 death = container_of(w, struct binder_ref_death, work);
3945 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003946 "undelivered death notification, %016llx\n",
3947 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003948 kfree(death);
3949 binder_stats_deleted(BINDER_STAT_DEATH);
3950 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003951 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303952 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003953 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003954 break;
3955 }
3956 }
3957
3958}
3959
Todd Kjosb4827902017-05-25 15:52:17 -07003960static struct binder_thread *binder_get_thread_ilocked(
3961 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003962{
3963 struct binder_thread *thread = NULL;
3964 struct rb_node *parent = NULL;
3965 struct rb_node **p = &proc->threads.rb_node;
3966
3967 while (*p) {
3968 parent = *p;
3969 thread = rb_entry(parent, struct binder_thread, rb_node);
3970
3971 if (current->pid < thread->pid)
3972 p = &(*p)->rb_left;
3973 else if (current->pid > thread->pid)
3974 p = &(*p)->rb_right;
3975 else
Todd Kjosb4827902017-05-25 15:52:17 -07003976 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003977 }
Todd Kjosb4827902017-05-25 15:52:17 -07003978 if (!new_thread)
3979 return NULL;
3980 thread = new_thread;
3981 binder_stats_created(BINDER_STAT_THREAD);
3982 thread->proc = proc;
3983 thread->pid = current->pid;
3984 atomic_set(&thread->tmp_ref, 0);
3985 init_waitqueue_head(&thread->wait);
3986 INIT_LIST_HEAD(&thread->todo);
3987 rb_link_node(&thread->rb_node, parent, p);
3988 rb_insert_color(&thread->rb_node, &proc->threads);
3989 thread->looper_need_return = true;
3990 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3991 thread->return_error.cmd = BR_OK;
3992 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3993 thread->reply_error.cmd = BR_OK;
3994
3995 return thread;
3996}
3997
3998static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3999{
4000 struct binder_thread *thread;
4001 struct binder_thread *new_thread;
4002
4003 binder_inner_proc_lock(proc);
4004 thread = binder_get_thread_ilocked(proc, NULL);
4005 binder_inner_proc_unlock(proc);
4006 if (!thread) {
4007 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4008 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004009 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004010 binder_inner_proc_lock(proc);
4011 thread = binder_get_thread_ilocked(proc, new_thread);
4012 binder_inner_proc_unlock(proc);
4013 if (thread != new_thread)
4014 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004015 }
4016 return thread;
4017}
4018
Todd Kjos2f993e22017-05-12 14:42:55 -07004019static void binder_free_proc(struct binder_proc *proc)
4020{
4021 BUG_ON(!list_empty(&proc->todo));
4022 BUG_ON(!list_empty(&proc->delivered_death));
4023 binder_alloc_deferred_release(&proc->alloc);
4024 put_task_struct(proc->tsk);
4025 binder_stats_deleted(BINDER_STAT_PROC);
4026 kfree(proc);
4027}
4028
4029static void binder_free_thread(struct binder_thread *thread)
4030{
4031 BUG_ON(!list_empty(&thread->todo));
4032 binder_stats_deleted(BINDER_STAT_THREAD);
4033 binder_proc_dec_tmpref(thread->proc);
4034 kfree(thread);
4035}
4036
4037static int binder_thread_release(struct binder_proc *proc,
4038 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004039{
4040 struct binder_transaction *t;
4041 struct binder_transaction *send_reply = NULL;
4042 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004043 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004044
Todd Kjosb4827902017-05-25 15:52:17 -07004045 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004046 /*
4047 * take a ref on the proc so it survives
4048 * after we remove this thread from proc->threads.
4049 * The corresponding dec is when we actually
4050 * free the thread in binder_free_thread()
4051 */
4052 proc->tmp_ref++;
4053 /*
4054 * take a ref on this thread to ensure it
4055 * survives while we are releasing it
4056 */
4057 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004058 rb_erase(&thread->rb_node, &proc->threads);
4059 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004060 if (t) {
4061 spin_lock(&t->lock);
4062 if (t->to_thread == thread)
4063 send_reply = t;
4064 }
4065 thread->is_dead = true;
4066
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004067 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004068 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004069 active_transactions++;
4070 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304071 "release %d:%d transaction %d %s, still active\n",
4072 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004073 t->debug_id,
4074 (t->to_thread == thread) ? "in" : "out");
4075
4076 if (t->to_thread == thread) {
4077 t->to_proc = NULL;
4078 t->to_thread = NULL;
4079 if (t->buffer) {
4080 t->buffer->transaction = NULL;
4081 t->buffer = NULL;
4082 }
4083 t = t->to_parent;
4084 } else if (t->from == thread) {
4085 t->from = NULL;
4086 t = t->from_parent;
4087 } else
4088 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004089 spin_unlock(&last_t->lock);
4090 if (t)
4091 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004092 }
Todd Kjosb4827902017-05-25 15:52:17 -07004093 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004094
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004095 if (send_reply)
4096 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004097 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004098 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004099 return active_transactions;
4100}
4101
4102static unsigned int binder_poll(struct file *filp,
4103 struct poll_table_struct *wait)
4104{
4105 struct binder_proc *proc = filp->private_data;
4106 struct binder_thread *thread = NULL;
4107 int wait_for_proc_work;
4108
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004109 binder_lock(__func__);
4110
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004111 thread = binder_get_thread(proc);
4112
Martijn Coenen995a36e2017-06-02 13:36:52 -07004113 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004114 wait_for_proc_work = thread->transaction_stack == NULL &&
Martijn Coenen995a36e2017-06-02 13:36:52 -07004115 binder_worklist_empty_ilocked(&thread->todo);
4116 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004117
4118 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004119
4120 if (wait_for_proc_work) {
4121 if (binder_has_proc_work(proc, thread))
4122 return POLLIN;
4123 poll_wait(filp, &proc->wait, wait);
4124 if (binder_has_proc_work(proc, thread))
4125 return POLLIN;
4126 } else {
4127 if (binder_has_thread_work(thread))
4128 return POLLIN;
4129 poll_wait(filp, &thread->wait, wait);
4130 if (binder_has_thread_work(thread))
4131 return POLLIN;
4132 }
4133 return 0;
4134}
4135
Tair Rzayev78260ac2014-06-03 22:27:21 +03004136static int binder_ioctl_write_read(struct file *filp,
4137 unsigned int cmd, unsigned long arg,
4138 struct binder_thread *thread)
4139{
4140 int ret = 0;
4141 struct binder_proc *proc = filp->private_data;
4142 unsigned int size = _IOC_SIZE(cmd);
4143 void __user *ubuf = (void __user *)arg;
4144 struct binder_write_read bwr;
4145
4146 if (size != sizeof(struct binder_write_read)) {
4147 ret = -EINVAL;
4148 goto out;
4149 }
4150 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4151 ret = -EFAULT;
4152 goto out;
4153 }
4154 binder_debug(BINDER_DEBUG_READ_WRITE,
4155 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4156 proc->pid, thread->pid,
4157 (u64)bwr.write_size, (u64)bwr.write_buffer,
4158 (u64)bwr.read_size, (u64)bwr.read_buffer);
4159
4160 if (bwr.write_size > 0) {
4161 ret = binder_thread_write(proc, thread,
4162 bwr.write_buffer,
4163 bwr.write_size,
4164 &bwr.write_consumed);
4165 trace_binder_write_done(ret);
4166 if (ret < 0) {
4167 bwr.read_consumed = 0;
4168 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4169 ret = -EFAULT;
4170 goto out;
4171 }
4172 }
4173 if (bwr.read_size > 0) {
4174 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4175 bwr.read_size,
4176 &bwr.read_consumed,
4177 filp->f_flags & O_NONBLOCK);
4178 trace_binder_read_done(ret);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004179 if (!binder_worklist_empty(proc, &proc->todo))
Tair Rzayev78260ac2014-06-03 22:27:21 +03004180 wake_up_interruptible(&proc->wait);
4181 if (ret < 0) {
4182 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4183 ret = -EFAULT;
4184 goto out;
4185 }
4186 }
4187 binder_debug(BINDER_DEBUG_READ_WRITE,
4188 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4189 proc->pid, thread->pid,
4190 (u64)bwr.write_consumed, (u64)bwr.write_size,
4191 (u64)bwr.read_consumed, (u64)bwr.read_size);
4192 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4193 ret = -EFAULT;
4194 goto out;
4195 }
4196out:
4197 return ret;
4198}
4199
4200static int binder_ioctl_set_ctx_mgr(struct file *filp)
4201{
4202 int ret = 0;
4203 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004204 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004205 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004206 kuid_t curr_euid = current_euid();
4207
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004208 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004209 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004210 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4211 ret = -EBUSY;
4212 goto out;
4213 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004214 ret = security_binder_set_context_mgr(proc->tsk);
4215 if (ret < 0)
4216 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004217 if (uid_valid(context->binder_context_mgr_uid)) {
4218 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004219 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4220 from_kuid(&init_user_ns, curr_euid),
4221 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004222 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004223 ret = -EPERM;
4224 goto out;
4225 }
4226 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004227 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004228 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004229 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004230 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004231 ret = -ENOMEM;
4232 goto out;
4233 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004234 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004235 new_node->local_weak_refs++;
4236 new_node->local_strong_refs++;
4237 new_node->has_strong_ref = 1;
4238 new_node->has_weak_ref = 1;
4239 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004240 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004241 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004242out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004243 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004244 return ret;
4245}
4246
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004247static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4248{
4249 int ret;
4250 struct binder_proc *proc = filp->private_data;
4251 struct binder_thread *thread;
4252 unsigned int size = _IOC_SIZE(cmd);
4253 void __user *ubuf = (void __user *)arg;
4254
Tair Rzayev78260ac2014-06-03 22:27:21 +03004255 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4256 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004257
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004258 trace_binder_ioctl(cmd, arg);
4259
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004260 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4261 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004262 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004263
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004264 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004265 thread = binder_get_thread(proc);
4266 if (thread == NULL) {
4267 ret = -ENOMEM;
4268 goto err;
4269 }
4270
4271 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004272 case BINDER_WRITE_READ:
4273 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4274 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004275 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004276 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004277 case BINDER_SET_MAX_THREADS: {
4278 int max_threads;
4279
4280 if (copy_from_user(&max_threads, ubuf,
4281 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004282 ret = -EINVAL;
4283 goto err;
4284 }
Todd Kjosd600e902017-05-25 17:35:02 -07004285 binder_inner_proc_lock(proc);
4286 proc->max_threads = max_threads;
4287 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004288 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004289 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004290 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004291 ret = binder_ioctl_set_ctx_mgr(filp);
4292 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004293 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004294 break;
4295 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304296 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004297 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004298 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004299 thread = NULL;
4300 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004301 case BINDER_VERSION: {
4302 struct binder_version __user *ver = ubuf;
4303
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004304 if (size != sizeof(struct binder_version)) {
4305 ret = -EINVAL;
4306 goto err;
4307 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004308 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4309 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004310 ret = -EINVAL;
4311 goto err;
4312 }
4313 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004314 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004315 default:
4316 ret = -EINVAL;
4317 goto err;
4318 }
4319 ret = 0;
4320err:
4321 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004322 thread->looper_need_return = false;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004323 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004324 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4325 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304326 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004327err_unlocked:
4328 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004329 return ret;
4330}
4331
4332static void binder_vma_open(struct vm_area_struct *vma)
4333{
4334 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004335
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004336 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304337 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004338 proc->pid, vma->vm_start, vma->vm_end,
4339 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4340 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004341}
4342
4343static void binder_vma_close(struct vm_area_struct *vma)
4344{
4345 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004346
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004347 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304348 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004349 proc->pid, vma->vm_start, vma->vm_end,
4350 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4351 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004352 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004353 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4354}
4355
Vinayak Menonddac7d52014-06-02 18:17:59 +05304356static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4357{
4358 return VM_FAULT_SIGBUS;
4359}
4360
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004361static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004362 .open = binder_vma_open,
4363 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304364 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004365};
4366
Todd Kjosd325d372016-10-10 10:40:53 -07004367static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4368{
4369 int ret;
4370 struct binder_proc *proc = filp->private_data;
4371 const char *failure_string;
4372
4373 if (proc->tsk != current->group_leader)
4374 return -EINVAL;
4375
4376 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4377 vma->vm_end = vma->vm_start + SZ_4M;
4378
4379 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4380 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4381 __func__, proc->pid, vma->vm_start, vma->vm_end,
4382 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4383 (unsigned long)pgprot_val(vma->vm_page_prot));
4384
4385 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4386 ret = -EPERM;
4387 failure_string = "bad vm_flags";
4388 goto err_bad_arg;
4389 }
4390 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4391 vma->vm_ops = &binder_vm_ops;
4392 vma->vm_private_data = proc;
4393
4394 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4395 if (ret)
4396 return ret;
4397 proc->files = get_files_struct(current);
4398 return 0;
4399
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004400err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004401 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004402 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4403 return ret;
4404}
4405
4406static int binder_open(struct inode *nodp, struct file *filp)
4407{
4408 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004409 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004410
4411 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4412 current->group_leader->pid, current->pid);
4413
4414 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4415 if (proc == NULL)
4416 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004417 spin_lock_init(&proc->inner_lock);
4418 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004419 get_task_struct(current->group_leader);
4420 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004421 INIT_LIST_HEAD(&proc->todo);
4422 init_waitqueue_head(&proc->wait);
4423 proc->default_priority = task_nice(current);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004424 binder_dev = container_of(filp->private_data, struct binder_device,
4425 miscdev);
4426 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004427 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004428
4429 binder_lock(__func__);
4430
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004431 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004432 proc->pid = current->group_leader->pid;
4433 INIT_LIST_HEAD(&proc->delivered_death);
4434 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004435
4436 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004437
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004438 mutex_lock(&binder_procs_lock);
4439 hlist_add_head(&proc->proc_node, &binder_procs);
4440 mutex_unlock(&binder_procs_lock);
4441
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004442 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004443 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004444
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004445 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004446 /*
4447 * proc debug entries are shared between contexts, so
4448 * this will fail if the process tries to open the driver
4449 * again with a different context. The priting code will
4450 * anyway print all contexts that a given PID has, so this
4451 * is not a problem.
4452 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004453 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004454 binder_debugfs_dir_entry_proc,
4455 (void *)(unsigned long)proc->pid,
4456 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004457 }
4458
4459 return 0;
4460}
4461
4462static int binder_flush(struct file *filp, fl_owner_t id)
4463{
4464 struct binder_proc *proc = filp->private_data;
4465
4466 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4467
4468 return 0;
4469}
4470
4471static void binder_deferred_flush(struct binder_proc *proc)
4472{
4473 struct rb_node *n;
4474 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004475
Todd Kjosb4827902017-05-25 15:52:17 -07004476 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004477 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4478 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004479
Todd Kjos6798e6d2017-01-06 14:19:25 -08004480 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004481 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4482 wake_up_interruptible(&thread->wait);
4483 wake_count++;
4484 }
4485 }
Todd Kjosb4827902017-05-25 15:52:17 -07004486 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004487 wake_up_interruptible_all(&proc->wait);
4488
4489 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4490 "binder_flush: %d woke %d threads\n", proc->pid,
4491 wake_count);
4492}
4493
4494static int binder_release(struct inode *nodp, struct file *filp)
4495{
4496 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004497
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004498 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004499 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4500
4501 return 0;
4502}
4503
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004504static int binder_node_release(struct binder_node *node, int refs)
4505{
4506 struct binder_ref *ref;
4507 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004508 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004509
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004510 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004511
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004512 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004513 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004514 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004515 /*
4516 * The caller must have taken a temporary ref on the node,
4517 */
4518 BUG_ON(!node->tmp_refs);
4519 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004520 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004521 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004522 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004523
4524 return refs;
4525 }
4526
4527 node->proc = NULL;
4528 node->local_strong_refs = 0;
4529 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004530 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004531
4532 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004533 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004534 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004535
4536 hlist_for_each_entry(ref, &node->refs, node_entry) {
4537 refs++;
4538
4539 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08004540 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004541
4542 death++;
4543
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004544 binder_inner_proc_lock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004545 if (list_empty(&ref->death->work.entry)) {
4546 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004547 binder_enqueue_work_ilocked(&ref->death->work,
4548 &ref->proc->todo);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004549 wake_up_interruptible(&ref->proc->wait);
4550 } else
4551 BUG();
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004552 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004553 }
4554
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004555 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4556 "node %d now dead, refs %d, death %d\n",
4557 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004558 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004559 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004560
4561 return refs;
4562}
4563
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004564static void binder_deferred_release(struct binder_proc *proc)
4565{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004566 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004567 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07004568 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004569
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004570 BUG_ON(proc->files);
4571
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004572 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004573 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004574 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004575
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004576 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004577 if (context->binder_context_mgr_node &&
4578 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004579 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004580 "%s: %d context_mgr_node gone\n",
4581 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004582 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004583 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004584 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07004585 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004586 /*
4587 * Make sure proc stays alive after we
4588 * remove all the threads
4589 */
4590 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004591
Todd Kjos2f993e22017-05-12 14:42:55 -07004592 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004593 threads = 0;
4594 active_transactions = 0;
4595 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004596 struct binder_thread *thread;
4597
4598 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004599 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004600 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07004601 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07004602 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004603 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004604
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004605 nodes = 0;
4606 incoming_refs = 0;
4607 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004608 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004609
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004610 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004611 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07004612 /*
4613 * take a temporary ref on the node before
4614 * calling binder_node_release() which will either
4615 * kfree() the node or call binder_put_node()
4616 */
Todd Kjos425d23f2017-06-12 12:07:26 -07004617 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004618 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07004619 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004620 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07004621 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004622 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004623 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004624
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004625 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07004626 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004627 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004628 struct binder_ref *ref;
4629
4630 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004631 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07004632 binder_cleanup_ref_olocked(ref);
4633 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004634 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07004635 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004636 }
Todd Kjos5346bf32016-10-20 16:43:34 -07004637 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004638
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004639 binder_release_work(proc, &proc->todo);
4640 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004641
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004642 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07004643 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004644 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07004645 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004646
Todd Kjos2f993e22017-05-12 14:42:55 -07004647 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004648}
4649
4650static void binder_deferred_func(struct work_struct *work)
4651{
4652 struct binder_proc *proc;
4653 struct files_struct *files;
4654
4655 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09004656
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004657 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004658 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004659 mutex_lock(&binder_deferred_lock);
4660 if (!hlist_empty(&binder_deferred_list)) {
4661 proc = hlist_entry(binder_deferred_list.first,
4662 struct binder_proc, deferred_work_node);
4663 hlist_del_init(&proc->deferred_work_node);
4664 defer = proc->deferred_work;
4665 proc->deferred_work = 0;
4666 } else {
4667 proc = NULL;
4668 defer = 0;
4669 }
4670 mutex_unlock(&binder_deferred_lock);
4671
4672 files = NULL;
4673 if (defer & BINDER_DEFERRED_PUT_FILES) {
4674 files = proc->files;
4675 if (files)
4676 proc->files = NULL;
4677 }
4678
4679 if (defer & BINDER_DEFERRED_FLUSH)
4680 binder_deferred_flush(proc);
4681
4682 if (defer & BINDER_DEFERRED_RELEASE)
4683 binder_deferred_release(proc); /* frees proc */
4684
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004685 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004686 if (files)
4687 put_files_struct(files);
4688 } while (proc);
4689}
4690static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4691
4692static void
4693binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4694{
4695 mutex_lock(&binder_deferred_lock);
4696 proc->deferred_work |= defer;
4697 if (hlist_unhashed(&proc->deferred_work_node)) {
4698 hlist_add_head(&proc->deferred_work_node,
4699 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05304700 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004701 }
4702 mutex_unlock(&binder_deferred_lock);
4703}
4704
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004705static void print_binder_transaction(struct seq_file *m, const char *prefix,
4706 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004707{
Todd Kjos2f993e22017-05-12 14:42:55 -07004708 spin_lock(&t->lock);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004709 seq_printf(m,
4710 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4711 prefix, t->debug_id, t,
4712 t->from ? t->from->proc->pid : 0,
4713 t->from ? t->from->pid : 0,
4714 t->to_proc ? t->to_proc->pid : 0,
4715 t->to_thread ? t->to_thread->pid : 0,
4716 t->code, t->flags, t->priority, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07004717 spin_unlock(&t->lock);
4718
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004719 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004720 seq_puts(m, " buffer free\n");
4721 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004722 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004723 if (t->buffer->target_node)
4724 seq_printf(m, " node %d",
4725 t->buffer->target_node->debug_id);
4726 seq_printf(m, " size %zd:%zd data %p\n",
4727 t->buffer->data_size, t->buffer->offsets_size,
4728 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004729}
4730
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004731static void print_binder_work_ilocked(struct seq_file *m, const char *prefix,
4732 const char *transaction_prefix,
4733 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004734{
4735 struct binder_node *node;
4736 struct binder_transaction *t;
4737
4738 switch (w->type) {
4739 case BINDER_WORK_TRANSACTION:
4740 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004741 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004742 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004743 case BINDER_WORK_RETURN_ERROR: {
4744 struct binder_error *e = container_of(
4745 w, struct binder_error, work);
4746
4747 seq_printf(m, "%stransaction error: %u\n",
4748 prefix, e->cmd);
4749 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004750 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004751 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004752 break;
4753 case BINDER_WORK_NODE:
4754 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08004755 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4756 prefix, node->debug_id,
4757 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004758 break;
4759 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004760 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004761 break;
4762 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004763 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004764 break;
4765 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004766 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004767 break;
4768 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004769 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004770 break;
4771 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004772}
4773
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004774static void print_binder_thread_ilocked(struct seq_file *m,
4775 struct binder_thread *thread,
4776 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004777{
4778 struct binder_transaction *t;
4779 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004780 size_t start_pos = m->count;
4781 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004782
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004783 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07004784 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08004785 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07004786 thread->looper_need_return,
4787 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004788 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004789 t = thread->transaction_stack;
4790 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004791 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004792 print_binder_transaction(m,
4793 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004794 t = t->from_parent;
4795 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004796 print_binder_transaction(m,
4797 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004798 t = t->to_parent;
4799 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004800 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004801 t = NULL;
4802 }
4803 }
4804 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004805 print_binder_work_ilocked(m, " ",
4806 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004807 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004808 if (!print_always && m->count == header_pos)
4809 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004810}
4811
Todd Kjos425d23f2017-06-12 12:07:26 -07004812static void print_binder_node_nilocked(struct seq_file *m,
4813 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004814{
4815 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004816 struct binder_work *w;
4817 int count;
4818
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004819 WARN_ON(!spin_is_locked(&node->lock));
Todd Kjos425d23f2017-06-12 12:07:26 -07004820 if (node->proc)
4821 WARN_ON(!spin_is_locked(&node->proc->inner_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004822
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004823 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08004824 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004825 count++;
4826
Todd Kjosf22abc72017-05-09 11:08:05 -07004827 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08004828 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004829 node->has_strong_ref, node->has_weak_ref,
4830 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07004831 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004832 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004833 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08004834 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004835 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004836 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004837 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004838 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004839 list_for_each_entry(w, &node->async_todo, entry)
4840 print_binder_work_ilocked(m, " ",
4841 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004842 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004843}
4844
Todd Kjos5346bf32016-10-20 16:43:34 -07004845static void print_binder_ref_olocked(struct seq_file *m,
4846 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004847{
Todd Kjos5346bf32016-10-20 16:43:34 -07004848 WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004849 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004850 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4851 ref->data.debug_id, ref->data.desc,
4852 ref->node->proc ? "" : "dead ",
4853 ref->node->debug_id, ref->data.strong,
4854 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004855 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004856}
4857
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004858static void print_binder_proc(struct seq_file *m,
4859 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004860{
4861 struct binder_work *w;
4862 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004863 size_t start_pos = m->count;
4864 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07004865 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004866
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004867 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004868 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004869 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004870
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004871 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004872 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004873 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004874 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07004875
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004876 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004877 struct binder_node *node = rb_entry(n, struct binder_node,
4878 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07004879 /*
4880 * take a temporary reference on the node so it
4881 * survives and isn't removed from the tree
4882 * while we print it.
4883 */
4884 binder_inc_node_tmpref_ilocked(node);
4885 /* Need to drop inner lock to take node lock */
4886 binder_inner_proc_unlock(proc);
4887 if (last_node)
4888 binder_put_node(last_node);
4889 binder_node_inner_lock(node);
4890 print_binder_node_nilocked(m, node);
4891 binder_node_inner_unlock(node);
4892 last_node = node;
4893 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004894 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004895 binder_inner_proc_unlock(proc);
4896 if (last_node)
4897 binder_put_node(last_node);
4898
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004899 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07004900 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004901 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004902 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004903 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07004904 print_binder_ref_olocked(m, rb_entry(n,
4905 struct binder_ref,
4906 rb_node_desc));
4907 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004908 }
Todd Kjosd325d372016-10-10 10:40:53 -07004909 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004910 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004911 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004912 print_binder_work_ilocked(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004913 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004914 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004915 break;
4916 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004917 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004918 if (!print_all && m->count == header_pos)
4919 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004920}
4921
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004922static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004923 "BR_ERROR",
4924 "BR_OK",
4925 "BR_TRANSACTION",
4926 "BR_REPLY",
4927 "BR_ACQUIRE_RESULT",
4928 "BR_DEAD_REPLY",
4929 "BR_TRANSACTION_COMPLETE",
4930 "BR_INCREFS",
4931 "BR_ACQUIRE",
4932 "BR_RELEASE",
4933 "BR_DECREFS",
4934 "BR_ATTEMPT_ACQUIRE",
4935 "BR_NOOP",
4936 "BR_SPAWN_LOOPER",
4937 "BR_FINISHED",
4938 "BR_DEAD_BINDER",
4939 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4940 "BR_FAILED_REPLY"
4941};
4942
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004943static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004944 "BC_TRANSACTION",
4945 "BC_REPLY",
4946 "BC_ACQUIRE_RESULT",
4947 "BC_FREE_BUFFER",
4948 "BC_INCREFS",
4949 "BC_ACQUIRE",
4950 "BC_RELEASE",
4951 "BC_DECREFS",
4952 "BC_INCREFS_DONE",
4953 "BC_ACQUIRE_DONE",
4954 "BC_ATTEMPT_ACQUIRE",
4955 "BC_REGISTER_LOOPER",
4956 "BC_ENTER_LOOPER",
4957 "BC_EXIT_LOOPER",
4958 "BC_REQUEST_DEATH_NOTIFICATION",
4959 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02004960 "BC_DEAD_BINDER_DONE",
4961 "BC_TRANSACTION_SG",
4962 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004963};
4964
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004965static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004966 "proc",
4967 "thread",
4968 "node",
4969 "ref",
4970 "death",
4971 "transaction",
4972 "transaction_complete"
4973};
4974
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004975static void print_binder_stats(struct seq_file *m, const char *prefix,
4976 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004977{
4978 int i;
4979
4980 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004981 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004982 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004983 int temp = atomic_read(&stats->bc[i]);
4984
4985 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004986 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004987 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004988 }
4989
4990 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004991 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004992 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004993 int temp = atomic_read(&stats->br[i]);
4994
4995 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004996 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004997 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004998 }
4999
5000 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005001 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005002 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005003 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005004 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005005 int created = atomic_read(&stats->obj_created[i]);
5006 int deleted = atomic_read(&stats->obj_deleted[i]);
5007
5008 if (created || deleted)
5009 seq_printf(m, "%s%s: active %d total %d\n",
5010 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005011 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005012 created - deleted,
5013 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005014 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005015}
5016
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005017static void print_binder_proc_stats(struct seq_file *m,
5018 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005019{
5020 struct binder_work *w;
5021 struct rb_node *n;
5022 int count, strong, weak;
Todd Kjosb4827902017-05-25 15:52:17 -07005023 size_t free_async_space =
5024 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005025
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005026 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005027 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005028 count = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005029 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005030 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5031 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005032 seq_printf(m, " threads: %d\n", count);
5033 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005034 " ready threads %d\n"
5035 " free async space %zd\n", proc->requested_threads,
5036 proc->requested_threads_started, proc->max_threads,
Todd Kjosd325d372016-10-10 10:40:53 -07005037 proc->ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005038 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005039 count = 0;
5040 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5041 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005042 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005043 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005044 count = 0;
5045 strong = 0;
5046 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005047 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005048 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5049 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5050 rb_node_desc);
5051 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005052 strong += ref->data.strong;
5053 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005054 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005055 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005056 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005057
Todd Kjosd325d372016-10-10 10:40:53 -07005058 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005059 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005060
5061 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005062 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005063 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005064 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005065 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005066 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005067 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005068 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005069
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005070 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005071}
5072
5073
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005074static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005075{
5076 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005077 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005078 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005079
Todd Kjos48b33212017-05-24 11:53:13 -07005080 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005081
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005082 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005083
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005084 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005085 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005086 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005087 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5088 /*
5089 * take a temporary reference on the node so it
5090 * survives and isn't removed from the list
5091 * while we print it.
5092 */
5093 node->tmp_refs++;
5094 spin_unlock(&binder_dead_nodes_lock);
5095 if (last_node)
5096 binder_put_node(last_node);
5097 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005098 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005099 binder_node_unlock(node);
5100 last_node = node;
5101 spin_lock(&binder_dead_nodes_lock);
5102 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005103 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005104 if (last_node)
5105 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005106
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005107 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005108 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005109 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005110 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005111 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005112 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005113}
5114
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005115static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005116{
5117 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005118
Todd Kjos48b33212017-05-24 11:53:13 -07005119 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005120
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005121 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005122
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005123 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005124
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005125 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005126 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005127 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005128 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005129 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005130 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005131}
5132
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005133static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005134{
5135 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005136
Todd Kjos48b33212017-05-24 11:53:13 -07005137 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005138
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005139 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005140 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005141 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005142 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005143 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005144 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005145 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005146}
5147
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005148static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005149{
Riley Andrews83050a42016-02-09 21:05:33 -08005150 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005151 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005152
Todd Kjos48b33212017-05-24 11:53:13 -07005153 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08005154
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005155 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005156 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005157 if (itr->pid == pid) {
5158 seq_puts(m, "binder proc state:\n");
5159 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005160 }
5161 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005162 mutex_unlock(&binder_procs_lock);
5163
Todd Kjos48b33212017-05-24 11:53:13 -07005164 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005165 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005166}
5167
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005168static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005169 struct binder_transaction_log_entry *e)
5170{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005171 int debug_id = READ_ONCE(e->debug_id_done);
5172 /*
5173 * read barrier to guarantee debug_id_done read before
5174 * we print the log values
5175 */
5176 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005177 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005178 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005179 e->debug_id, (e->call_type == 2) ? "reply" :
5180 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005181 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005182 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5183 e->return_error, e->return_error_param,
5184 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005185 /*
5186 * read-barrier to guarantee read of debug_id_done after
5187 * done printing the fields of the entry
5188 */
5189 smp_rmb();
5190 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5191 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005192}
5193
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005194static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005195{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005196 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005197 unsigned int log_cur = atomic_read(&log->cur);
5198 unsigned int count;
5199 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005200 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005201
Todd Kjos1cfe6272017-05-24 13:33:28 -07005202 count = log_cur + 1;
5203 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5204 0 : count % ARRAY_SIZE(log->entry);
5205 if (count > ARRAY_SIZE(log->entry) || log->full)
5206 count = ARRAY_SIZE(log->entry);
5207 for (i = 0; i < count; i++) {
5208 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5209
5210 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005211 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005212 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005213}
5214
5215static const struct file_operations binder_fops = {
5216 .owner = THIS_MODULE,
5217 .poll = binder_poll,
5218 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005219 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005220 .mmap = binder_mmap,
5221 .open = binder_open,
5222 .flush = binder_flush,
5223 .release = binder_release,
5224};
5225
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005226BINDER_DEBUG_ENTRY(state);
5227BINDER_DEBUG_ENTRY(stats);
5228BINDER_DEBUG_ENTRY(transactions);
5229BINDER_DEBUG_ENTRY(transaction_log);
5230
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005231static int __init init_binder_device(const char *name)
5232{
5233 int ret;
5234 struct binder_device *binder_device;
5235
5236 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5237 if (!binder_device)
5238 return -ENOMEM;
5239
5240 binder_device->miscdev.fops = &binder_fops;
5241 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5242 binder_device->miscdev.name = name;
5243
5244 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5245 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005246 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005247
5248 ret = misc_register(&binder_device->miscdev);
5249 if (ret < 0) {
5250 kfree(binder_device);
5251 return ret;
5252 }
5253
5254 hlist_add_head(&binder_device->hlist, &binder_devices);
5255
5256 return ret;
5257}
5258
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005259static int __init binder_init(void)
5260{
5261 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005262 char *device_name, *device_names;
5263 struct binder_device *device;
5264 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005265
Todd Kjos1cfe6272017-05-24 13:33:28 -07005266 atomic_set(&binder_transaction_log.cur, ~0U);
5267 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5268
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005269 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5270 if (binder_debugfs_dir_entry_root)
5271 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5272 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005273
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005274 if (binder_debugfs_dir_entry_root) {
5275 debugfs_create_file("state",
5276 S_IRUGO,
5277 binder_debugfs_dir_entry_root,
5278 NULL,
5279 &binder_state_fops);
5280 debugfs_create_file("stats",
5281 S_IRUGO,
5282 binder_debugfs_dir_entry_root,
5283 NULL,
5284 &binder_stats_fops);
5285 debugfs_create_file("transactions",
5286 S_IRUGO,
5287 binder_debugfs_dir_entry_root,
5288 NULL,
5289 &binder_transactions_fops);
5290 debugfs_create_file("transaction_log",
5291 S_IRUGO,
5292 binder_debugfs_dir_entry_root,
5293 &binder_transaction_log,
5294 &binder_transaction_log_fops);
5295 debugfs_create_file("failed_transaction_log",
5296 S_IRUGO,
5297 binder_debugfs_dir_entry_root,
5298 &binder_transaction_log_failed,
5299 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005300 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005301
5302 /*
5303 * Copy the module_parameter string, because we don't want to
5304 * tokenize it in-place.
5305 */
5306 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5307 if (!device_names) {
5308 ret = -ENOMEM;
5309 goto err_alloc_device_names_failed;
5310 }
5311 strcpy(device_names, binder_devices_param);
5312
5313 while ((device_name = strsep(&device_names, ","))) {
5314 ret = init_binder_device(device_name);
5315 if (ret)
5316 goto err_init_binder_device_failed;
5317 }
5318
5319 return ret;
5320
5321err_init_binder_device_failed:
5322 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5323 misc_deregister(&device->miscdev);
5324 hlist_del(&device->hlist);
5325 kfree(device);
5326 }
5327err_alloc_device_names_failed:
5328 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5329
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005330 return ret;
5331}
5332
5333device_initcall(binder_init);
5334
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005335#define CREATE_TRACE_POINTS
5336#include "binder_trace.h"
5337
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005338MODULE_LICENSE("GPL v2");