blob: dc7a4f5171c861e5aa75e587263aa8e1c4066d36 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
Martijn Coenen22d64e4322017-06-02 11:15:44 -070031 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070075#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070076#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090077
Todd Kjos8d9f6f32016-10-17 12:33:15 -070078static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090079static DEFINE_MUTEX(binder_deferred_lock);
80
Martijn Coenen6b7c7122016-09-30 16:08:09 +020081static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090082static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070083static DEFINE_MUTEX(binder_procs_lock);
84
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090085static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070086static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090087
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070088static struct dentry *binder_debugfs_dir_entry_root;
89static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070090static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070092#define BINDER_DEBUG_ENTRY(name) \
93static int binder_##name##_open(struct inode *inode, struct file *file) \
94{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070095 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070096} \
97\
98static const struct file_operations binder_##name##_fops = { \
99 .owner = THIS_MODULE, \
100 .open = binder_##name##_open, \
101 .read = seq_read, \
102 .llseek = seq_lseek, \
103 .release = single_release, \
104}
105
106static int binder_proc_show(struct seq_file *m, void *unused);
107BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900108
109/* This is only defined in include/asm-arm/sizes.h */
110#ifndef SZ_1K
111#define SZ_1K 0x400
112#endif
113
114#ifndef SZ_4M
115#define SZ_4M 0x400000
116#endif
117
118#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
119
120#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
121
122enum {
123 BINDER_DEBUG_USER_ERROR = 1U << 0,
124 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
125 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
126 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
127 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
128 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
129 BINDER_DEBUG_READ_WRITE = 1U << 6,
130 BINDER_DEBUG_USER_REFS = 1U << 7,
131 BINDER_DEBUG_THREADS = 1U << 8,
132 BINDER_DEBUG_TRANSACTION = 1U << 9,
133 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
134 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
135 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700136 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700137 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900138};
139static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
140 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
Harsh Shandilya174562a2017-12-22 19:37:02 +0530141module_param_named(debug_mask, binder_debug_mask, uint, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900142
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200143static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
144module_param_named(devices, binder_devices_param, charp, S_IRUGO);
145
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900146static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
147static int binder_stop_on_user_error;
148
149static int binder_set_stop_on_user_error(const char *val,
Kees Cook24da2c82017-10-17 19:04:42 -0700150 const struct kernel_param *kp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900151{
152 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900153
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900154 ret = param_set_int(val, kp);
155 if (binder_stop_on_user_error < 2)
156 wake_up(&binder_user_error_wait);
157 return ret;
158}
159module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
Harsh Shandilya174562a2017-12-22 19:37:02 +0530160 param_get_int, &binder_stop_on_user_error, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900161
162#define binder_debug(mask, x...) \
163 do { \
164 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400165 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900166 } while (0)
167
168#define binder_user_error(x...) \
169 do { \
170 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400171 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900172 if (binder_stop_on_user_error) \
173 binder_stop_on_user_error = 2; \
174 } while (0)
175
Martijn Coenen00c80372016-07-13 12:06:49 +0200176#define to_flat_binder_object(hdr) \
177 container_of(hdr, struct flat_binder_object, hdr)
178
179#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
180
Martijn Coenen5a6da532016-09-30 14:10:07 +0200181#define to_binder_buffer_object(hdr) \
182 container_of(hdr, struct binder_buffer_object, hdr)
183
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200184#define to_binder_fd_array_object(hdr) \
185 container_of(hdr, struct binder_fd_array_object, hdr)
186
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900187enum binder_stat_types {
188 BINDER_STAT_PROC,
189 BINDER_STAT_THREAD,
190 BINDER_STAT_NODE,
191 BINDER_STAT_REF,
192 BINDER_STAT_DEATH,
193 BINDER_STAT_TRANSACTION,
194 BINDER_STAT_TRANSACTION_COMPLETE,
195 BINDER_STAT_COUNT
196};
197
198struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700199 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
200 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
201 atomic_t obj_created[BINDER_STAT_COUNT];
202 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900203};
204
205static struct binder_stats binder_stats;
206
207static inline void binder_stats_deleted(enum binder_stat_types type)
208{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700209 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900210}
211
212static inline void binder_stats_created(enum binder_stat_types type)
213{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700214 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900215}
216
217struct binder_transaction_log_entry {
218 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700219 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900220 int call_type;
221 int from_proc;
222 int from_thread;
223 int target_handle;
224 int to_proc;
225 int to_thread;
226 int to_node;
227 int data_size;
228 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700229 int return_error_line;
230 uint32_t return_error;
231 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200232 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900233};
234struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700235 atomic_t cur;
236 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900237 struct binder_transaction_log_entry entry[32];
238};
239static struct binder_transaction_log binder_transaction_log;
240static struct binder_transaction_log binder_transaction_log_failed;
241
242static struct binder_transaction_log_entry *binder_transaction_log_add(
243 struct binder_transaction_log *log)
244{
245 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700246 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900247
Todd Kjos1cfe6272017-05-24 13:33:28 -0700248 if (cur >= ARRAY_SIZE(log->entry))
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -0600249 log->full = true;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700250 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
251 WRITE_ONCE(e->debug_id_done, 0);
252 /*
253 * write-barrier to synchronize access to e->debug_id_done.
254 * We make sure the initialized 0 value is seen before
255 * memset() other fields are zeroed by memset.
256 */
257 smp_wmb();
258 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900259 return e;
260}
261
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200262struct binder_context {
263 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700264 struct mutex context_mgr_node_lock;
265
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200266 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200267 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200268};
269
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200270struct binder_device {
271 struct hlist_node hlist;
272 struct miscdevice miscdev;
273 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200274};
275
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700276/**
277 * struct binder_work - work enqueued on a worklist
278 * @entry: node enqueued on list
279 * @type: type of work to be performed
280 *
281 * There are separate work lists for proc, thread, and node (async).
282 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900283struct binder_work {
284 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700285
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900286 enum {
287 BINDER_WORK_TRANSACTION = 1,
288 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700289 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900290 BINDER_WORK_NODE,
291 BINDER_WORK_DEAD_BINDER,
292 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
293 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
294 } type;
295};
296
Todd Kjos858b8da2017-04-21 17:35:12 -0700297struct binder_error {
298 struct binder_work work;
299 uint32_t cmd;
300};
301
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700302/**
303 * struct binder_node - binder node bookkeeping
304 * @debug_id: unique ID for debugging
305 * (invariant after initialized)
306 * @lock: lock for node fields
307 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700308 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700309 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700310 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700311 * @dead_node: element for binder_dead_nodes list
312 * (protected by binder_dead_nodes_lock)
313 * @proc: binder_proc that owns this node
314 * (invariant after initialized)
315 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700316 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700317 * @internal_strong_refs: used to take strong references when
318 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700319 * (protected by @proc->inner_lock if @proc
320 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700321 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700324 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700327 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700328 * (protected by @proc->inner_lock while @proc
329 * is valid, and by binder_dead_nodes_lock
330 * if @proc is NULL. During inc/dec and node release
331 * it is also protected by @lock to provide safety
332 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700333 * @ptr: userspace pointer for node
334 * (invariant, no lock needed)
335 * @cookie: userspace cookie for node
336 * (invariant, no lock needed)
337 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700338 * (protected by @proc->inner_lock if @proc
339 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700340 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700343 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700346 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700349 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700350 * (protected by @lock)
Martijn Coenen6aac9792017-06-07 09:29:14 -0700351 * @sched_policy: minimum scheduling policy for node
352 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700353 * @accept_fds: file descriptor operations supported for node
354 * (invariant after initialized)
355 * @min_priority: minimum scheduling priority
356 * (invariant after initialized)
Martijn Coenenc46810c2017-06-23 10:13:43 -0700357 * @inherit_rt: inherit RT scheduling policy from caller
358 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700359 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700360 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700361 *
362 * Bookkeeping structure for binder nodes.
363 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900364struct binder_node {
365 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700366 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900367 struct binder_work work;
368 union {
369 struct rb_node rb_node;
370 struct hlist_node dead_node;
371 };
372 struct binder_proc *proc;
373 struct hlist_head refs;
374 int internal_strong_refs;
375 int local_weak_refs;
376 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700377 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800378 binder_uintptr_t ptr;
379 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700380 struct {
381 /*
382 * bitfield elements protected by
383 * proc inner_lock
384 */
385 u8 has_strong_ref:1;
386 u8 pending_strong_ref:1;
387 u8 has_weak_ref:1;
388 u8 pending_weak_ref:1;
389 };
390 struct {
391 /*
392 * invariant after initialization
393 */
Martijn Coenen6aac9792017-06-07 09:29:14 -0700394 u8 sched_policy:2;
Martijn Coenenc46810c2017-06-23 10:13:43 -0700395 u8 inherit_rt:1;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700396 u8 accept_fds:1;
397 u8 min_priority;
398 };
399 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400 struct list_head async_todo;
401};
402
403struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700404 /**
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
408 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900409 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800410 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411};
412
Todd Kjosb0117bb2017-05-08 09:16:27 -0700413/**
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
419 *
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
424 */
425struct binder_ref_data {
426 int debug_id;
427 uint32_t desc;
428 int strong;
429 int weak;
430};
431
432/**
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700438 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenf9eac642017-05-22 11:26:23 -0700444 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700445 *
446 * Structure to track references from procA to target node (on procB). This
447 * structure is unsafe to access without holding @proc->outer_lock.
448 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900449struct binder_ref {
450 /* Lookups needed: */
451 /* node + proc => ref (transaction) */
452 /* desc + proc => ref (transaction, inc/dec ref) */
453 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700454 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900455 struct rb_node rb_node_desc;
456 struct rb_node rb_node_node;
457 struct hlist_node node_entry;
458 struct binder_proc *proc;
459 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900460 struct binder_ref_death *death;
461};
462
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900463enum binder_deferred_state {
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200464 BINDER_DEFERRED_PUT_FILES = 0x01,
465 BINDER_DEFERRED_FLUSH = 0x02,
466 BINDER_DEFERRED_RELEASE = 0x04,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900467};
468
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700469/**
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700470 * struct binder_priority - scheduler policy and priority
471 * @sched_policy scheduler policy
472 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
473 *
474 * The binder driver supports inheriting the following scheduler policies:
475 * SCHED_NORMAL
476 * SCHED_BATCH
477 * SCHED_FIFO
478 * SCHED_RR
479 */
480struct binder_priority {
481 unsigned int sched_policy;
482 int prio;
483};
484
485/**
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700486 * struct binder_proc - binder process bookkeeping
487 * @proc_node: element for binder_procs list
488 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700489 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700490 * @nodes: rbtree of binder nodes associated with
491 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700492 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700493 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700494 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700495 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700496 * (protected by @outer_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700497 * @waiting_threads: threads currently waiting for proc work
498 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700499 * @pid PID of group_leader of process
500 * (invariant after initialized)
501 * @tsk task_struct for group_leader of process
502 * (invariant after initialized)
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200503 * @files files_struct for process
504 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700505 * @deferred_work_node: element for binder_deferred_list
506 * (protected by binder_deferred_lock)
507 * @deferred_work: bitmap of deferred work to perform
508 * (protected by binder_deferred_lock)
509 * @is_dead: process is dead and awaiting free
510 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700511 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700512 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700513 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700514 * @stats: per-process binder statistics
515 * (atomics, no lock needed)
516 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700517 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700518 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700519 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700520 * @requested_threads: number of binder threads requested but not
521 * yet started. In current implementation, can
522 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700523 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700524 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700525 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700526 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700527 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700528 * @default_priority: default scheduler priority
529 * (invariant after initialized)
530 * @debugfs_entry: debugfs node
531 * @alloc: binder allocator bookkeeping
532 * @context: binder_context for this proc
533 * (invariant after initialized)
534 * @inner_lock: can nest under outer_lock and/or node lock
535 * @outer_lock: no nesting under innor or node lock
536 * Lock order: 1) outer, 2) node, 3) inner
537 *
538 * Bookkeeping structure for binder processes
539 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900540struct binder_proc {
541 struct hlist_node proc_node;
542 struct rb_root threads;
543 struct rb_root nodes;
544 struct rb_root refs_by_desc;
545 struct rb_root refs_by_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700546 struct list_head waiting_threads;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900547 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900548 struct task_struct *tsk;
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200549 struct files_struct *files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900550 struct hlist_node deferred_work_node;
551 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700552 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900553
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900554 struct list_head todo;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900555 struct binder_stats stats;
556 struct list_head delivered_death;
557 int max_threads;
558 int requested_threads;
559 int requested_threads_started;
Todd Kjos2f993e22017-05-12 14:42:55 -0700560 int tmp_ref;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700561 struct binder_priority default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700562 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700563 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200564 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700565 spinlock_t inner_lock;
566 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900567};
568
569enum {
570 BINDER_LOOPER_STATE_REGISTERED = 0x01,
571 BINDER_LOOPER_STATE_ENTERED = 0x02,
572 BINDER_LOOPER_STATE_EXITED = 0x04,
573 BINDER_LOOPER_STATE_INVALID = 0x08,
574 BINDER_LOOPER_STATE_WAITING = 0x10,
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700575 BINDER_LOOPER_STATE_POLL = 0x20,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900576};
577
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700578/**
579 * struct binder_thread - binder thread bookkeeping
580 * @proc: binder process for this thread
581 * (invariant after initialization)
582 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700583 * (protected by @proc->inner_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700584 * @waiting_thread_node: element for @proc->waiting_threads list
585 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700586 * @pid: PID for this thread
587 * (invariant after initialization)
588 * @looper: bitmap of looping state
589 * (only accessed by this thread)
590 * @looper_needs_return: looping thread needs to exit driver
591 * (no lock needed)
592 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700593 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700594 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700595 * (protected by @proc->inner_lock)
Martijn Coenen1af61802017-10-19 15:04:46 +0200596 * @process_todo: whether work in @todo should be processed
597 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700598 * @return_error: transaction errors reported by this thread
599 * (only accessed by this thread)
600 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700601 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700602 * @wait: wait queue for thread work
603 * @stats: per-thread statistics
604 * (atomics, no lock needed)
605 * @tmp_ref: temporary reference to indicate thread is in use
606 * (atomic since @proc->inner_lock cannot
607 * always be acquired)
608 * @is_dead: thread is dead and awaiting free
609 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700610 * (protected by @proc->inner_lock)
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700611 * @task: struct task_struct for this thread
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700612 *
613 * Bookkeeping structure for binder threads.
614 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900615struct binder_thread {
616 struct binder_proc *proc;
617 struct rb_node rb_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700618 struct list_head waiting_thread_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900619 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800620 int looper; /* only modified by this thread */
621 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900622 struct binder_transaction *transaction_stack;
623 struct list_head todo;
Martijn Coenen1af61802017-10-19 15:04:46 +0200624 bool process_todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700625 struct binder_error return_error;
626 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900627 wait_queue_head_t wait;
628 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700629 atomic_t tmp_ref;
630 bool is_dead;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700631 struct task_struct *task;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900632};
633
634struct binder_transaction {
635 int debug_id;
636 struct binder_work work;
637 struct binder_thread *from;
638 struct binder_transaction *from_parent;
639 struct binder_proc *to_proc;
640 struct binder_thread *to_thread;
641 struct binder_transaction *to_parent;
642 unsigned need_reply:1;
643 /* unsigned is_dead:1; */ /* not used at the moment */
644
645 struct binder_buffer *buffer;
646 unsigned int code;
647 unsigned int flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700648 struct binder_priority priority;
649 struct binder_priority saved_priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700650 bool set_priority_called;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600651 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700652 /**
653 * @lock: protects @from, @to_proc, and @to_thread
654 *
655 * @from, @to_proc, and @to_thread can be set to NULL
656 * during thread teardown
657 */
658 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900659};
660
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700661/**
662 * binder_proc_lock() - Acquire outer lock for given binder_proc
663 * @proc: struct binder_proc to acquire
664 *
665 * Acquires proc->outer_lock. Used to protect binder_ref
666 * structures associated with the given proc.
667 */
668#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
669static void
670_binder_proc_lock(struct binder_proc *proc, int line)
671{
672 binder_debug(BINDER_DEBUG_SPINLOCKS,
673 "%s: line=%d\n", __func__, line);
674 spin_lock(&proc->outer_lock);
675}
676
677/**
678 * binder_proc_unlock() - Release spinlock for given binder_proc
679 * @proc: struct binder_proc to acquire
680 *
681 * Release lock acquired via binder_proc_lock()
682 */
683#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
684static void
685_binder_proc_unlock(struct binder_proc *proc, int line)
686{
687 binder_debug(BINDER_DEBUG_SPINLOCKS,
688 "%s: line=%d\n", __func__, line);
689 spin_unlock(&proc->outer_lock);
690}
691
692/**
693 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
694 * @proc: struct binder_proc to acquire
695 *
696 * Acquires proc->inner_lock. Used to protect todo lists
697 */
698#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
699static void
700_binder_inner_proc_lock(struct binder_proc *proc, int line)
701{
702 binder_debug(BINDER_DEBUG_SPINLOCKS,
703 "%s: line=%d\n", __func__, line);
704 spin_lock(&proc->inner_lock);
705}
706
707/**
708 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
709 * @proc: struct binder_proc to acquire
710 *
711 * Release lock acquired via binder_inner_proc_lock()
712 */
713#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
714static void
715_binder_inner_proc_unlock(struct binder_proc *proc, int line)
716{
717 binder_debug(BINDER_DEBUG_SPINLOCKS,
718 "%s: line=%d\n", __func__, line);
719 spin_unlock(&proc->inner_lock);
720}
721
722/**
723 * binder_node_lock() - Acquire spinlock for given binder_node
724 * @node: struct binder_node to acquire
725 *
726 * Acquires node->lock. Used to protect binder_node fields
727 */
728#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
729static void
730_binder_node_lock(struct binder_node *node, int line)
731{
732 binder_debug(BINDER_DEBUG_SPINLOCKS,
733 "%s: line=%d\n", __func__, line);
734 spin_lock(&node->lock);
735}
736
737/**
738 * binder_node_unlock() - Release spinlock for given binder_proc
739 * @node: struct binder_node to acquire
740 *
741 * Release lock acquired via binder_node_lock()
742 */
743#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
744static void
745_binder_node_unlock(struct binder_node *node, int line)
746{
747 binder_debug(BINDER_DEBUG_SPINLOCKS,
748 "%s: line=%d\n", __func__, line);
749 spin_unlock(&node->lock);
750}
751
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700752/**
753 * binder_node_inner_lock() - Acquire node and inner locks
754 * @node: struct binder_node to acquire
755 *
756 * Acquires node->lock. If node->proc also acquires
757 * proc->inner_lock. Used to protect binder_node fields
758 */
759#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
760static void
761_binder_node_inner_lock(struct binder_node *node, int line)
762{
763 binder_debug(BINDER_DEBUG_SPINLOCKS,
764 "%s: line=%d\n", __func__, line);
765 spin_lock(&node->lock);
766 if (node->proc)
767 binder_inner_proc_lock(node->proc);
768}
769
770/**
771 * binder_node_unlock() - Release node and inner locks
772 * @node: struct binder_node to acquire
773 *
774 * Release lock acquired via binder_node_lock()
775 */
776#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
777static void
778_binder_node_inner_unlock(struct binder_node *node, int line)
779{
780 struct binder_proc *proc = node->proc;
781
782 binder_debug(BINDER_DEBUG_SPINLOCKS,
783 "%s: line=%d\n", __func__, line);
784 if (proc)
785 binder_inner_proc_unlock(proc);
786 spin_unlock(&node->lock);
787}
788
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700789static bool binder_worklist_empty_ilocked(struct list_head *list)
790{
791 return list_empty(list);
792}
793
794/**
795 * binder_worklist_empty() - Check if no items on the work list
796 * @proc: binder_proc associated with list
797 * @list: list to check
798 *
799 * Return: true if there are no items on list, else false
800 */
801static bool binder_worklist_empty(struct binder_proc *proc,
802 struct list_head *list)
803{
804 bool ret;
805
806 binder_inner_proc_lock(proc);
807 ret = binder_worklist_empty_ilocked(list);
808 binder_inner_proc_unlock(proc);
809 return ret;
810}
811
Martijn Coenen1af61802017-10-19 15:04:46 +0200812/**
813 * binder_enqueue_work_ilocked() - Add an item to the work list
814 * @work: struct binder_work to add to list
815 * @target_list: list to add work to
816 *
817 * Adds the work to the specified list. Asserts that work
818 * is not already on a list.
819 *
820 * Requires the proc->inner_lock to be held.
821 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700822static void
823binder_enqueue_work_ilocked(struct binder_work *work,
824 struct list_head *target_list)
825{
826 BUG_ON(target_list == NULL);
827 BUG_ON(work->entry.next && !list_empty(&work->entry));
828 list_add_tail(&work->entry, target_list);
829}
830
831/**
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100832 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
Martijn Coenen1af61802017-10-19 15:04:46 +0200833 * @thread: thread to queue work to
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700834 * @work: struct binder_work to add to list
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700835 *
Martijn Coenen1af61802017-10-19 15:04:46 +0200836 * Adds the work to the todo list of the thread. Doesn't set the process_todo
837 * flag, which means that (if it wasn't already set) the thread will go to
838 * sleep without handling this work when it calls read.
839 *
840 * Requires the proc->inner_lock to be held.
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700841 */
842static void
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100843binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
844 struct binder_work *work)
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700845{
Martijn Coenen1af61802017-10-19 15:04:46 +0200846 binder_enqueue_work_ilocked(work, &thread->todo);
847}
848
849/**
850 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
851 * @thread: thread to queue work to
852 * @work: struct binder_work to add to list
853 *
854 * Adds the work to the todo list of the thread, and enables processing
855 * of the todo queue.
856 *
857 * Requires the proc->inner_lock to be held.
858 */
859static void
860binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
861 struct binder_work *work)
862{
863 binder_enqueue_work_ilocked(work, &thread->todo);
864 thread->process_todo = true;
865}
866
867/**
868 * binder_enqueue_thread_work() - Add an item to the thread work list
869 * @thread: thread to queue work to
870 * @work: struct binder_work to add to list
871 *
872 * Adds the work to the todo list of the thread, and enables processing
873 * of the todo queue.
874 */
875static void
876binder_enqueue_thread_work(struct binder_thread *thread,
877 struct binder_work *work)
878{
879 binder_inner_proc_lock(thread->proc);
880 binder_enqueue_thread_work_ilocked(thread, work);
881 binder_inner_proc_unlock(thread->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700882}
883
884static void
885binder_dequeue_work_ilocked(struct binder_work *work)
886{
887 list_del_init(&work->entry);
888}
889
890/**
891 * binder_dequeue_work() - Removes an item from the work list
892 * @proc: binder_proc associated with list
893 * @work: struct binder_work to remove from list
894 *
895 * Removes the specified work item from whatever list it is on.
896 * Can safely be called if work is not on any list.
897 */
898static void
899binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
900{
901 binder_inner_proc_lock(proc);
902 binder_dequeue_work_ilocked(work);
903 binder_inner_proc_unlock(proc);
904}
905
906static struct binder_work *binder_dequeue_work_head_ilocked(
907 struct list_head *list)
908{
909 struct binder_work *w;
910
911 w = list_first_entry_or_null(list, struct binder_work, entry);
912 if (w)
913 list_del_init(&w->entry);
914 return w;
915}
916
917/**
918 * binder_dequeue_work_head() - Dequeues the item at head of list
919 * @proc: binder_proc associated with list
920 * @list: list to dequeue head
921 *
922 * Removes the head of the list if there are items on the list
923 *
924 * Return: pointer dequeued binder_work, NULL if list was empty
925 */
926static struct binder_work *binder_dequeue_work_head(
927 struct binder_proc *proc,
928 struct list_head *list)
929{
930 struct binder_work *w;
931
932 binder_inner_proc_lock(proc);
933 w = binder_dequeue_work_head_ilocked(list);
934 binder_inner_proc_unlock(proc);
935 return w;
936}
937
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900938static void
939binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700940static void binder_free_thread(struct binder_thread *thread);
941static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700942static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900943
Sachin Kamatefde99c2012-08-17 16:39:36 +0530944static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900945{
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200946 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900947 unsigned long rlim_cur;
948 unsigned long irqs;
949
950 if (files == NULL)
951 return -ESRCH;
952
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200953 if (!lock_task_sighand(proc->tsk, &irqs))
954 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900955
Al Virodcfadfa2012-08-12 17:27:30 -0400956 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
957 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900958
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200959 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900960}
961
962/*
963 * copied from fd_install
964 */
965static void task_fd_install(
966 struct binder_proc *proc, unsigned int fd, struct file *file)
967{
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200968 if (proc->files)
969 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900970}
971
972/*
973 * copied from sys_close
974 */
975static long task_close_fd(struct binder_proc *proc, unsigned int fd)
976{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900977 int retval;
978
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200979 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900980 return -ESRCH;
981
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200982 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900983 /* can't restart close syscall because file table entry was cleared */
984 if (unlikely(retval == -ERESTARTSYS ||
985 retval == -ERESTARTNOINTR ||
986 retval == -ERESTARTNOHAND ||
987 retval == -ERESTART_RESTARTBLOCK))
988 retval = -EINTR;
989
990 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900991}
992
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700993static bool binder_has_work_ilocked(struct binder_thread *thread,
994 bool do_proc_work)
995{
Martijn Coenen1af61802017-10-19 15:04:46 +0200996 return thread->process_todo ||
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700997 thread->looper_need_return ||
998 (do_proc_work &&
999 !binder_worklist_empty_ilocked(&thread->proc->todo));
1000}
1001
1002static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1003{
1004 bool has_work;
1005
1006 binder_inner_proc_lock(thread->proc);
1007 has_work = binder_has_work_ilocked(thread, do_proc_work);
1008 binder_inner_proc_unlock(thread->proc);
1009
1010 return has_work;
1011}
1012
1013static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1014{
1015 return !thread->transaction_stack &&
1016 binder_worklist_empty_ilocked(&thread->todo) &&
1017 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1018 BINDER_LOOPER_STATE_REGISTERED));
1019}
1020
1021static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1022 bool sync)
1023{
1024 struct rb_node *n;
1025 struct binder_thread *thread;
1026
1027 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1028 thread = rb_entry(n, struct binder_thread, rb_node);
1029 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1030 binder_available_for_proc_work_ilocked(thread)) {
1031 if (sync)
1032 wake_up_interruptible_sync(&thread->wait);
1033 else
1034 wake_up_interruptible(&thread->wait);
1035 }
1036 }
1037}
1038
Martijn Coenen053be422017-06-06 15:17:46 -07001039/**
1040 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1041 * @proc: process to select a thread from
1042 *
1043 * Note that calling this function moves the thread off the waiting_threads
1044 * list, so it can only be woken up by the caller of this function, or a
1045 * signal. Therefore, callers *should* always wake up the thread this function
1046 * returns.
1047 *
1048 * Return: If there's a thread currently waiting for process work,
1049 * returns that thread. Otherwise returns NULL.
1050 */
1051static struct binder_thread *
1052binder_select_thread_ilocked(struct binder_proc *proc)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001053{
1054 struct binder_thread *thread;
1055
Martijn Coenened323352017-07-27 23:52:24 +02001056 assert_spin_locked(&proc->inner_lock);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001057 thread = list_first_entry_or_null(&proc->waiting_threads,
1058 struct binder_thread,
1059 waiting_thread_node);
1060
Martijn Coenen053be422017-06-06 15:17:46 -07001061 if (thread)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001062 list_del_init(&thread->waiting_thread_node);
Martijn Coenen053be422017-06-06 15:17:46 -07001063
1064 return thread;
1065}
1066
1067/**
1068 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1069 * @proc: process to wake up a thread in
1070 * @thread: specific thread to wake-up (may be NULL)
1071 * @sync: whether to do a synchronous wake-up
1072 *
1073 * This function wakes up a thread in the @proc process.
1074 * The caller may provide a specific thread to wake-up in
1075 * the @thread parameter. If @thread is NULL, this function
1076 * will wake up threads that have called poll().
1077 *
1078 * Note that for this function to work as expected, callers
1079 * should first call binder_select_thread() to find a thread
1080 * to handle the work (if they don't have a thread already),
1081 * and pass the result into the @thread parameter.
1082 */
1083static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1084 struct binder_thread *thread,
1085 bool sync)
1086{
Martijn Coenened323352017-07-27 23:52:24 +02001087 assert_spin_locked(&proc->inner_lock);
Martijn Coenen053be422017-06-06 15:17:46 -07001088
1089 if (thread) {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001090 if (sync)
1091 wake_up_interruptible_sync(&thread->wait);
1092 else
1093 wake_up_interruptible(&thread->wait);
1094 return;
1095 }
1096
1097 /* Didn't find a thread waiting for proc work; this can happen
1098 * in two scenarios:
1099 * 1. All threads are busy handling transactions
1100 * In that case, one of those threads should call back into
1101 * the kernel driver soon and pick up this work.
1102 * 2. Threads are using the (e)poll interface, in which case
1103 * they may be blocked on the waitqueue without having been
1104 * added to waiting_threads. For this case, we just iterate
1105 * over all threads not handling transaction work, and
1106 * wake them all up. We wake all because we don't know whether
1107 * a thread that called into (e)poll is handling non-binder
1108 * work currently.
1109 */
1110 binder_wakeup_poll_threads_ilocked(proc, sync);
1111}
1112
Martijn Coenen053be422017-06-06 15:17:46 -07001113static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1114{
1115 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1116
1117 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1118}
1119
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001120static bool is_rt_policy(int policy)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001121{
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001122 return policy == SCHED_FIFO || policy == SCHED_RR;
1123}
Seunghun Lee10f62862014-05-01 01:30:23 +09001124
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001125static bool is_fair_policy(int policy)
1126{
1127 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1128}
1129
1130static bool binder_supported_policy(int policy)
1131{
1132 return is_fair_policy(policy) || is_rt_policy(policy);
1133}
1134
1135static int to_userspace_prio(int policy, int kernel_priority)
1136{
1137 if (is_fair_policy(policy))
1138 return PRIO_TO_NICE(kernel_priority);
1139 else
1140 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1141}
1142
1143static int to_kernel_prio(int policy, int user_priority)
1144{
1145 if (is_fair_policy(policy))
1146 return NICE_TO_PRIO(user_priority);
1147 else
1148 return MAX_USER_RT_PRIO - 1 - user_priority;
1149}
1150
Martijn Coenenecd972d2017-05-26 10:48:56 -07001151static void binder_do_set_priority(struct task_struct *task,
1152 struct binder_priority desired,
1153 bool verify)
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001154{
1155 int priority; /* user-space prio value */
1156 bool has_cap_nice;
1157 unsigned int policy = desired.sched_policy;
1158
1159 if (task->policy == policy && task->normal_prio == desired.prio)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001160 return;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001161
1162 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1163
1164 priority = to_userspace_prio(policy, desired.prio);
1165
Martijn Coenenecd972d2017-05-26 10:48:56 -07001166 if (verify && is_rt_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001167 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1168
1169 if (max_rtprio == 0) {
1170 policy = SCHED_NORMAL;
1171 priority = MIN_NICE;
1172 } else if (priority > max_rtprio) {
1173 priority = max_rtprio;
1174 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001175 }
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001176
Martijn Coenenecd972d2017-05-26 10:48:56 -07001177 if (verify && is_fair_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001178 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1179
1180 if (min_nice > MAX_NICE) {
1181 binder_user_error("%d RLIMIT_NICE not set\n",
1182 task->pid);
1183 return;
1184 } else if (priority < min_nice) {
1185 priority = min_nice;
1186 }
1187 }
1188
1189 if (policy != desired.sched_policy ||
1190 to_kernel_prio(policy, priority) != desired.prio)
1191 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1192 "%d: priority %d not allowed, using %d instead\n",
1193 task->pid, desired.prio,
1194 to_kernel_prio(policy, priority));
1195
Martijn Coenen81402ea2017-05-08 09:33:22 -07001196 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1197 to_kernel_prio(policy, priority),
1198 desired.prio);
1199
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001200 /* Set the actual priority */
1201 if (task->policy != policy || is_rt_policy(policy)) {
1202 struct sched_param params;
1203
1204 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1205
1206 sched_setscheduler_nocheck(task,
1207 policy | SCHED_RESET_ON_FORK,
1208 &params);
1209 }
1210 if (is_fair_policy(policy))
1211 set_user_nice(task, priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001212}
1213
Martijn Coenenecd972d2017-05-26 10:48:56 -07001214static void binder_set_priority(struct task_struct *task,
1215 struct binder_priority desired)
1216{
1217 binder_do_set_priority(task, desired, /* verify = */ true);
1218}
1219
1220static void binder_restore_priority(struct task_struct *task,
1221 struct binder_priority desired)
1222{
1223 binder_do_set_priority(task, desired, /* verify = */ false);
1224}
1225
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001226static void binder_transaction_priority(struct task_struct *task,
1227 struct binder_transaction *t,
Martijn Coenenc46810c2017-06-23 10:13:43 -07001228 struct binder_priority node_prio,
1229 bool inherit_rt)
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001230{
Ganesh Mahendran9add7c42017-09-27 15:12:25 +08001231 struct binder_priority desired_prio = t->priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001232
1233 if (t->set_priority_called)
1234 return;
1235
1236 t->set_priority_called = true;
1237 t->saved_priority.sched_policy = task->policy;
1238 t->saved_priority.prio = task->normal_prio;
1239
Martijn Coenenc46810c2017-06-23 10:13:43 -07001240 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1241 desired_prio.prio = NICE_TO_PRIO(0);
1242 desired_prio.sched_policy = SCHED_NORMAL;
Martijn Coenenc46810c2017-06-23 10:13:43 -07001243 }
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001244
1245 if (node_prio.prio < t->priority.prio ||
1246 (node_prio.prio == t->priority.prio &&
1247 node_prio.sched_policy == SCHED_FIFO)) {
1248 /*
1249 * In case the minimum priority on the node is
1250 * higher (lower value), use that priority. If
1251 * the priority is the same, but the node uses
1252 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1253 * run unbounded, unlike SCHED_RR.
1254 */
1255 desired_prio = node_prio;
1256 }
1257
1258 binder_set_priority(task, desired_prio);
1259}
1260
Todd Kjos425d23f2017-06-12 12:07:26 -07001261static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1262 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001263{
1264 struct rb_node *n = proc->nodes.rb_node;
1265 struct binder_node *node;
1266
Martijn Coenened323352017-07-27 23:52:24 +02001267 assert_spin_locked(&proc->inner_lock);
Todd Kjos425d23f2017-06-12 12:07:26 -07001268
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001269 while (n) {
1270 node = rb_entry(n, struct binder_node, rb_node);
1271
1272 if (ptr < node->ptr)
1273 n = n->rb_left;
1274 else if (ptr > node->ptr)
1275 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -07001276 else {
1277 /*
1278 * take an implicit weak reference
1279 * to ensure node stays alive until
1280 * call to binder_put_node()
1281 */
Todd Kjos425d23f2017-06-12 12:07:26 -07001282 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001283 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001284 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001285 }
1286 return NULL;
1287}
1288
Todd Kjos425d23f2017-06-12 12:07:26 -07001289static struct binder_node *binder_get_node(struct binder_proc *proc,
1290 binder_uintptr_t ptr)
1291{
1292 struct binder_node *node;
1293
1294 binder_inner_proc_lock(proc);
1295 node = binder_get_node_ilocked(proc, ptr);
1296 binder_inner_proc_unlock(proc);
1297 return node;
1298}
1299
1300static struct binder_node *binder_init_node_ilocked(
1301 struct binder_proc *proc,
1302 struct binder_node *new_node,
1303 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001304{
1305 struct rb_node **p = &proc->nodes.rb_node;
1306 struct rb_node *parent = NULL;
1307 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001308 binder_uintptr_t ptr = fp ? fp->binder : 0;
1309 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1310 __u32 flags = fp ? fp->flags : 0;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001311 s8 priority;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001312
Martijn Coenened323352017-07-27 23:52:24 +02001313 assert_spin_locked(&proc->inner_lock);
1314
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001315 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001316
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001317 parent = *p;
1318 node = rb_entry(parent, struct binder_node, rb_node);
1319
1320 if (ptr < node->ptr)
1321 p = &(*p)->rb_left;
1322 else if (ptr > node->ptr)
1323 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001324 else {
1325 /*
1326 * A matching node is already in
1327 * the rb tree. Abandon the init
1328 * and return it.
1329 */
1330 binder_inc_node_tmpref_ilocked(node);
1331 return node;
1332 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001333 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001334 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001335 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001336 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001337 rb_link_node(&node->rb_node, parent, p);
1338 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001339 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001340 node->proc = proc;
1341 node->ptr = ptr;
1342 node->cookie = cookie;
1343 node->work.type = BINDER_WORK_NODE;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001344 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
Ganesh Mahendran6cd26312017-09-26 17:56:25 +08001345 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
Martijn Coenen6aac9792017-06-07 09:29:14 -07001346 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1347 node->min_priority = to_kernel_prio(node->sched_policy, priority);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001348 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Martijn Coenenc46810c2017-06-23 10:13:43 -07001349 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001350 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001351 INIT_LIST_HEAD(&node->work.entry);
1352 INIT_LIST_HEAD(&node->async_todo);
1353 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001354 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001355 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001356 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001357
1358 return node;
1359}
1360
1361static struct binder_node *binder_new_node(struct binder_proc *proc,
1362 struct flat_binder_object *fp)
1363{
1364 struct binder_node *node;
1365 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1366
1367 if (!new_node)
1368 return NULL;
1369 binder_inner_proc_lock(proc);
1370 node = binder_init_node_ilocked(proc, new_node, fp);
1371 binder_inner_proc_unlock(proc);
1372 if (node != new_node)
1373 /*
1374 * The node was already added by another thread
1375 */
1376 kfree(new_node);
1377
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001378 return node;
1379}
1380
Todd Kjose7f23ed2017-03-21 13:06:01 -07001381static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001382{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001383 kfree(node);
1384 binder_stats_deleted(BINDER_STAT_NODE);
1385}
1386
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001387static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1388 int internal,
1389 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001390{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001391 struct binder_proc *proc = node->proc;
1392
Martijn Coenened323352017-07-27 23:52:24 +02001393 assert_spin_locked(&node->lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001394 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001395 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001396 if (strong) {
1397 if (internal) {
1398 if (target_list == NULL &&
1399 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001400 !(node->proc &&
1401 node == node->proc->context->
1402 binder_context_mgr_node &&
1403 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301404 pr_err("invalid inc strong node for %d\n",
1405 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001406 return -EINVAL;
1407 }
1408 node->internal_strong_refs++;
1409 } else
1410 node->local_strong_refs++;
1411 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001412 binder_dequeue_work_ilocked(&node->work);
Martijn Coenen1af61802017-10-19 15:04:46 +02001413 /*
1414 * Note: this function is the only place where we queue
1415 * directly to a thread->todo without using the
1416 * corresponding binder_enqueue_thread_work() helper
1417 * functions; in this case it's ok to not set the
1418 * process_todo flag, since we know this node work will
1419 * always be followed by other work that starts queue
1420 * processing: in case of synchronous transactions, a
1421 * BR_REPLY or BR_ERROR; in case of oneway
1422 * transactions, a BR_TRANSACTION_COMPLETE.
1423 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001424 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001425 }
1426 } else {
1427 if (!internal)
1428 node->local_weak_refs++;
1429 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1430 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301431 pr_err("invalid inc weak node for %d\n",
1432 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001433 return -EINVAL;
1434 }
Martijn Coenen1af61802017-10-19 15:04:46 +02001435 /*
1436 * See comment above
1437 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001438 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001439 }
1440 }
1441 return 0;
1442}
1443
Todd Kjose7f23ed2017-03-21 13:06:01 -07001444static int binder_inc_node(struct binder_node *node, int strong, int internal,
1445 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001446{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001447 int ret;
1448
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001449 binder_node_inner_lock(node);
1450 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1451 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001452
1453 return ret;
1454}
1455
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001456static bool binder_dec_node_nilocked(struct binder_node *node,
1457 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001458{
1459 struct binder_proc *proc = node->proc;
1460
Martijn Coenened323352017-07-27 23:52:24 +02001461 assert_spin_locked(&node->lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001462 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001463 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001464 if (strong) {
1465 if (internal)
1466 node->internal_strong_refs--;
1467 else
1468 node->local_strong_refs--;
1469 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001470 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001471 } else {
1472 if (!internal)
1473 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001474 if (node->local_weak_refs || node->tmp_refs ||
1475 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001476 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001477 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001478
1479 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001480 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001481 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07001482 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001483 }
1484 } else {
1485 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001486 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001487 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001488 binder_dequeue_work_ilocked(&node->work);
1489 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001490 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301491 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001492 node->debug_id);
1493 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001494 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001495 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001496 /*
1497 * tmp_refs could have changed so
1498 * check it again
1499 */
1500 if (node->tmp_refs) {
1501 spin_unlock(&binder_dead_nodes_lock);
1502 return false;
1503 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001504 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001505 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001506 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301507 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001508 node->debug_id);
1509 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001510 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001511 }
1512 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001513 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001514}
1515
Todd Kjose7f23ed2017-03-21 13:06:01 -07001516static void binder_dec_node(struct binder_node *node, int strong, int internal)
1517{
1518 bool free_node;
1519
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001520 binder_node_inner_lock(node);
1521 free_node = binder_dec_node_nilocked(node, strong, internal);
1522 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001523 if (free_node)
1524 binder_free_node(node);
1525}
1526
1527static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001528{
1529 /*
1530 * No call to binder_inc_node() is needed since we
1531 * don't need to inform userspace of any changes to
1532 * tmp_refs
1533 */
1534 node->tmp_refs++;
1535}
1536
1537/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001538 * binder_inc_node_tmpref() - take a temporary reference on node
1539 * @node: node to reference
1540 *
1541 * Take reference on node to prevent the node from being freed
1542 * while referenced only by a local variable. The inner lock is
1543 * needed to serialize with the node work on the queue (which
1544 * isn't needed after the node is dead). If the node is dead
1545 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1546 * node->tmp_refs against dead-node-only cases where the node
1547 * lock cannot be acquired (eg traversing the dead node list to
1548 * print nodes)
1549 */
1550static void binder_inc_node_tmpref(struct binder_node *node)
1551{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001552 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001553 if (node->proc)
1554 binder_inner_proc_lock(node->proc);
1555 else
1556 spin_lock(&binder_dead_nodes_lock);
1557 binder_inc_node_tmpref_ilocked(node);
1558 if (node->proc)
1559 binder_inner_proc_unlock(node->proc);
1560 else
1561 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001562 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001563}
1564
1565/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001566 * binder_dec_node_tmpref() - remove a temporary reference on node
1567 * @node: node to reference
1568 *
1569 * Release temporary reference on node taken via binder_inc_node_tmpref()
1570 */
1571static void binder_dec_node_tmpref(struct binder_node *node)
1572{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001573 bool free_node;
1574
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001575 binder_node_inner_lock(node);
1576 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001577 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001578 node->tmp_refs--;
1579 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001580 if (!node->proc)
1581 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001582 /*
1583 * Call binder_dec_node() to check if all refcounts are 0
1584 * and cleanup is needed. Calling with strong=0 and internal=1
1585 * causes no actual reference to be released in binder_dec_node().
1586 * If that changes, a change is needed here too.
1587 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001588 free_node = binder_dec_node_nilocked(node, 0, 1);
1589 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001590 if (free_node)
1591 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001592}
1593
1594static void binder_put_node(struct binder_node *node)
1595{
1596 binder_dec_node_tmpref(node);
1597}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001598
Todd Kjos5346bf32016-10-20 16:43:34 -07001599static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1600 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001601{
1602 struct rb_node *n = proc->refs_by_desc.rb_node;
1603 struct binder_ref *ref;
1604
1605 while (n) {
1606 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1607
Todd Kjosb0117bb2017-05-08 09:16:27 -07001608 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001609 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001610 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001611 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001612 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001613 binder_user_error("tried to use weak ref as strong ref\n");
1614 return NULL;
1615 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001616 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001617 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001618 }
1619 return NULL;
1620}
1621
Todd Kjosb0117bb2017-05-08 09:16:27 -07001622/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001623 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001624 * @proc: binder_proc that owns the ref
1625 * @node: binder_node of target
1626 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1627 *
1628 * Look up the ref for the given node and return it if it exists
1629 *
1630 * If it doesn't exist and the caller provides a newly allocated
1631 * ref, initialize the fields of the newly allocated ref and insert
1632 * into the given proc rb_trees and node refs list.
1633 *
1634 * Return: the ref for node. It is possible that another thread
1635 * allocated/initialized the ref first in which case the
1636 * returned ref would be different than the passed-in
1637 * new_ref. new_ref must be kfree'd by the caller in
1638 * this case.
1639 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001640static struct binder_ref *binder_get_ref_for_node_olocked(
1641 struct binder_proc *proc,
1642 struct binder_node *node,
1643 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001644{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001645 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001646 struct rb_node **p = &proc->refs_by_node.rb_node;
1647 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001648 struct binder_ref *ref;
1649 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001650
1651 while (*p) {
1652 parent = *p;
1653 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1654
1655 if (node < ref->node)
1656 p = &(*p)->rb_left;
1657 else if (node > ref->node)
1658 p = &(*p)->rb_right;
1659 else
1660 return ref;
1661 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001662 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001663 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001664
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001665 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001666 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001667 new_ref->proc = proc;
1668 new_ref->node = node;
1669 rb_link_node(&new_ref->rb_node_node, parent, p);
1670 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1671
Todd Kjosb0117bb2017-05-08 09:16:27 -07001672 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001673 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1674 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001675 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001676 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001677 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001678 }
1679
1680 p = &proc->refs_by_desc.rb_node;
1681 while (*p) {
1682 parent = *p;
1683 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1684
Todd Kjosb0117bb2017-05-08 09:16:27 -07001685 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001686 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001687 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001688 p = &(*p)->rb_right;
1689 else
1690 BUG();
1691 }
1692 rb_link_node(&new_ref->rb_node_desc, parent, p);
1693 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001694
1695 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001696 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001697
Todd Kjos4cbe5752017-05-01 17:21:51 -07001698 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1699 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001700 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001701 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001702 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001703 return new_ref;
1704}
1705
Todd Kjos5346bf32016-10-20 16:43:34 -07001706static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001707{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001708 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001709
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001710 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301711 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001712 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301713 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001714
1715 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1716 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001717
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001718 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001719 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001720 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001721
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001722 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001723 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1724 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001725 /*
1726 * Clear ref->node unless we want the caller to free the node
1727 */
1728 if (!delete_node) {
1729 /*
1730 * The caller uses ref->node to determine
1731 * whether the node needs to be freed. Clear
1732 * it since the node is still alive.
1733 */
1734 ref->node = NULL;
1735 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001736
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001737 if (ref->death) {
1738 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301739 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001740 ref->proc->pid, ref->data.debug_id,
1741 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001742 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001743 binder_stats_deleted(BINDER_STAT_DEATH);
1744 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001745 binder_stats_deleted(BINDER_STAT_REF);
1746}
1747
Todd Kjosb0117bb2017-05-08 09:16:27 -07001748/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001749 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001750 * @ref: ref to be incremented
1751 * @strong: if true, strong increment, else weak
1752 * @target_list: list to queue node work on
1753 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001754 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001755 *
1756 * Return: 0, if successful, else errno
1757 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001758static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1759 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001760{
1761 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001762
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001763 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001764 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001765 ret = binder_inc_node(ref->node, 1, 1, target_list);
1766 if (ret)
1767 return ret;
1768 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001769 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001770 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001771 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001772 ret = binder_inc_node(ref->node, 0, 1, target_list);
1773 if (ret)
1774 return ret;
1775 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001776 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001777 }
1778 return 0;
1779}
1780
Todd Kjosb0117bb2017-05-08 09:16:27 -07001781/**
1782 * binder_dec_ref() - dec the ref for given handle
1783 * @ref: ref to be decremented
1784 * @strong: if true, strong decrement, else weak
1785 *
1786 * Decrement the ref.
1787 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001788 * Return: true if ref is cleaned up and ready to be freed
1789 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001790static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001791{
1792 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001793 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301794 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001795 ref->proc->pid, ref->data.debug_id,
1796 ref->data.desc, ref->data.strong,
1797 ref->data.weak);
1798 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001799 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001800 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001801 if (ref->data.strong == 0)
1802 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001803 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001804 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301805 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001806 ref->proc->pid, ref->data.debug_id,
1807 ref->data.desc, ref->data.strong,
1808 ref->data.weak);
1809 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001810 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001811 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001812 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001813 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001814 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001815 return true;
1816 }
1817 return false;
1818}
1819
1820/**
1821 * binder_get_node_from_ref() - get the node from the given proc/desc
1822 * @proc: proc containing the ref
1823 * @desc: the handle associated with the ref
1824 * @need_strong_ref: if true, only return node if ref is strong
1825 * @rdata: the id/refcount data for the ref
1826 *
1827 * Given a proc and ref handle, return the associated binder_node
1828 *
1829 * Return: a binder_node or NULL if not found or not strong when strong required
1830 */
1831static struct binder_node *binder_get_node_from_ref(
1832 struct binder_proc *proc,
1833 u32 desc, bool need_strong_ref,
1834 struct binder_ref_data *rdata)
1835{
1836 struct binder_node *node;
1837 struct binder_ref *ref;
1838
Todd Kjos5346bf32016-10-20 16:43:34 -07001839 binder_proc_lock(proc);
1840 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001841 if (!ref)
1842 goto err_no_ref;
1843 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001844 /*
1845 * Take an implicit reference on the node to ensure
1846 * it stays alive until the call to binder_put_node()
1847 */
1848 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001849 if (rdata)
1850 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001851 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001852
1853 return node;
1854
1855err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001856 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001857 return NULL;
1858}
1859
1860/**
1861 * binder_free_ref() - free the binder_ref
1862 * @ref: ref to free
1863 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001864 * Free the binder_ref. Free the binder_node indicated by ref->node
1865 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001866 */
1867static void binder_free_ref(struct binder_ref *ref)
1868{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001869 if (ref->node)
1870 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001871 kfree(ref->death);
1872 kfree(ref);
1873}
1874
1875/**
1876 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1877 * @proc: proc containing the ref
1878 * @desc: the handle associated with the ref
1879 * @increment: true=inc reference, false=dec reference
1880 * @strong: true=strong reference, false=weak reference
1881 * @rdata: the id/refcount data for the ref
1882 *
1883 * Given a proc and ref handle, increment or decrement the ref
1884 * according to "increment" arg.
1885 *
1886 * Return: 0 if successful, else errno
1887 */
1888static int binder_update_ref_for_handle(struct binder_proc *proc,
1889 uint32_t desc, bool increment, bool strong,
1890 struct binder_ref_data *rdata)
1891{
1892 int ret = 0;
1893 struct binder_ref *ref;
1894 bool delete_ref = false;
1895
Todd Kjos5346bf32016-10-20 16:43:34 -07001896 binder_proc_lock(proc);
1897 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001898 if (!ref) {
1899 ret = -EINVAL;
1900 goto err_no_ref;
1901 }
1902 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001903 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001904 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001905 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001906
1907 if (rdata)
1908 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001909 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001910
1911 if (delete_ref)
1912 binder_free_ref(ref);
1913 return ret;
1914
1915err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001916 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001917 return ret;
1918}
1919
1920/**
1921 * binder_dec_ref_for_handle() - dec the ref for given handle
1922 * @proc: proc containing the ref
1923 * @desc: the handle associated with the ref
1924 * @strong: true=strong reference, false=weak reference
1925 * @rdata: the id/refcount data for the ref
1926 *
1927 * Just calls binder_update_ref_for_handle() to decrement the ref.
1928 *
1929 * Return: 0 if successful, else errno
1930 */
1931static int binder_dec_ref_for_handle(struct binder_proc *proc,
1932 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1933{
1934 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1935}
1936
1937
1938/**
1939 * binder_inc_ref_for_node() - increment the ref for given proc/node
1940 * @proc: proc containing the ref
1941 * @node: target node
1942 * @strong: true=strong reference, false=weak reference
1943 * @target_list: worklist to use if node is incremented
1944 * @rdata: the id/refcount data for the ref
1945 *
1946 * Given a proc and node, increment the ref. Create the ref if it
1947 * doesn't already exist
1948 *
1949 * Return: 0 if successful, else errno
1950 */
1951static int binder_inc_ref_for_node(struct binder_proc *proc,
1952 struct binder_node *node,
1953 bool strong,
1954 struct list_head *target_list,
1955 struct binder_ref_data *rdata)
1956{
1957 struct binder_ref *ref;
1958 struct binder_ref *new_ref = NULL;
1959 int ret = 0;
1960
Todd Kjos5346bf32016-10-20 16:43:34 -07001961 binder_proc_lock(proc);
1962 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001963 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001964 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001965 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1966 if (!new_ref)
1967 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001968 binder_proc_lock(proc);
1969 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001970 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001971 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001972 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001973 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001974 if (new_ref && ref != new_ref)
1975 /*
1976 * Another thread created the ref first so
1977 * free the one we allocated
1978 */
1979 kfree(new_ref);
1980 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001981}
1982
Martijn Coenen995a36e2017-06-02 13:36:52 -07001983static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1984 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001985{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001986 BUG_ON(!target_thread);
Martijn Coenened323352017-07-27 23:52:24 +02001987 assert_spin_locked(&target_thread->proc->inner_lock);
Todd Kjos21ef40a2017-03-30 18:02:13 -07001988 BUG_ON(target_thread->transaction_stack != t);
1989 BUG_ON(target_thread->transaction_stack->from != target_thread);
1990 target_thread->transaction_stack =
1991 target_thread->transaction_stack->from_parent;
1992 t->from = NULL;
1993}
1994
Todd Kjos2f993e22017-05-12 14:42:55 -07001995/**
1996 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1997 * @thread: thread to decrement
1998 *
1999 * A thread needs to be kept alive while being used to create or
2000 * handle a transaction. binder_get_txn_from() is used to safely
2001 * extract t->from from a binder_transaction and keep the thread
2002 * indicated by t->from from being freed. When done with that
2003 * binder_thread, this function is called to decrement the
2004 * tmp_ref and free if appropriate (thread has been released
2005 * and no transaction being processed by the driver)
2006 */
2007static void binder_thread_dec_tmpref(struct binder_thread *thread)
2008{
2009 /*
2010 * atomic is used to protect the counter value while
2011 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07002012 */
Todd Kjosb4827902017-05-25 15:52:17 -07002013 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002014 atomic_dec(&thread->tmp_ref);
2015 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07002016 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002017 binder_free_thread(thread);
2018 return;
2019 }
Todd Kjosb4827902017-05-25 15:52:17 -07002020 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002021}
2022
2023/**
2024 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2025 * @proc: proc to decrement
2026 *
2027 * A binder_proc needs to be kept alive while being used to create or
2028 * handle a transaction. proc->tmp_ref is incremented when
2029 * creating a new transaction or the binder_proc is currently in-use
2030 * by threads that are being released. When done with the binder_proc,
2031 * this function is called to decrement the counter and free the
2032 * proc if appropriate (proc has been released, all threads have
2033 * been released and not currenly in-use to process a transaction).
2034 */
2035static void binder_proc_dec_tmpref(struct binder_proc *proc)
2036{
Todd Kjosb4827902017-05-25 15:52:17 -07002037 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002038 proc->tmp_ref--;
2039 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2040 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07002041 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002042 binder_free_proc(proc);
2043 return;
2044 }
Todd Kjosb4827902017-05-25 15:52:17 -07002045 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002046}
2047
2048/**
2049 * binder_get_txn_from() - safely extract the "from" thread in transaction
2050 * @t: binder transaction for t->from
2051 *
2052 * Atomically return the "from" thread and increment the tmp_ref
2053 * count for the thread to ensure it stays alive until
2054 * binder_thread_dec_tmpref() is called.
2055 *
2056 * Return: the value of t->from
2057 */
2058static struct binder_thread *binder_get_txn_from(
2059 struct binder_transaction *t)
2060{
2061 struct binder_thread *from;
2062
2063 spin_lock(&t->lock);
2064 from = t->from;
2065 if (from)
2066 atomic_inc(&from->tmp_ref);
2067 spin_unlock(&t->lock);
2068 return from;
2069}
2070
Martijn Coenen995a36e2017-06-02 13:36:52 -07002071/**
2072 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2073 * @t: binder transaction for t->from
2074 *
2075 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2076 * to guarantee that the thread cannot be released while operating on it.
2077 * The caller must call binder_inner_proc_unlock() to release the inner lock
2078 * as well as call binder_dec_thread_txn() to release the reference.
2079 *
2080 * Return: the value of t->from
2081 */
2082static struct binder_thread *binder_get_txn_from_and_acq_inner(
2083 struct binder_transaction *t)
2084{
2085 struct binder_thread *from;
2086
2087 from = binder_get_txn_from(t);
2088 if (!from)
2089 return NULL;
2090 binder_inner_proc_lock(from->proc);
2091 if (t->from) {
2092 BUG_ON(from != t->from);
2093 return from;
2094 }
2095 binder_inner_proc_unlock(from->proc);
2096 binder_thread_dec_tmpref(from);
2097 return NULL;
2098}
2099
Todd Kjos21ef40a2017-03-30 18:02:13 -07002100static void binder_free_transaction(struct binder_transaction *t)
2101{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002102 if (t->buffer)
2103 t->buffer->transaction = NULL;
2104 kfree(t);
2105 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2106}
2107
2108static void binder_send_failed_reply(struct binder_transaction *t,
2109 uint32_t error_code)
2110{
2111 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002112 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09002113
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002114 BUG_ON(t->flags & TF_ONE_WAY);
2115 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002116 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002117 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002118 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2119 "send failed reply for transaction %d to %d:%d\n",
2120 t->debug_id,
2121 target_thread->proc->pid,
2122 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002123
Martijn Coenen995a36e2017-06-02 13:36:52 -07002124 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07002125 if (target_thread->reply_error.cmd == BR_OK) {
2126 target_thread->reply_error.cmd = error_code;
Martijn Coenen1af61802017-10-19 15:04:46 +02002127 binder_enqueue_thread_work_ilocked(
2128 target_thread,
2129 &target_thread->reply_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002130 wake_up_interruptible(&target_thread->wait);
2131 } else {
Todd Kjosd3a2afb2018-02-07 12:38:47 -08002132 /*
2133 * Cannot get here for normal operation, but
2134 * we can if multiple synchronous transactions
2135 * are sent without blocking for responses.
2136 * Just ignore the 2nd error in this case.
2137 */
2138 pr_warn("Unexpected reply error: %u\n",
2139 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002140 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002141 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002142 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07002143 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002144 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002145 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002146 next = t->from_parent;
2147
2148 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2149 "send failed reply for transaction %d, target dead\n",
2150 t->debug_id);
2151
Todd Kjos21ef40a2017-03-30 18:02:13 -07002152 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002153 if (next == NULL) {
2154 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2155 "reply failed, no target thread at root\n");
2156 return;
2157 }
2158 t = next;
2159 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2160 "reply failed, no target thread -- retry %d\n",
2161 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002162 }
2163}
2164
Martijn Coenen00c80372016-07-13 12:06:49 +02002165/**
Martijn Coenen3217ccc2017-08-24 15:23:36 +02002166 * binder_cleanup_transaction() - cleans up undelivered transaction
2167 * @t: transaction that needs to be cleaned up
2168 * @reason: reason the transaction wasn't delivered
2169 * @error_code: error to return to caller (if synchronous call)
2170 */
2171static void binder_cleanup_transaction(struct binder_transaction *t,
2172 const char *reason,
2173 uint32_t error_code)
2174{
2175 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2176 binder_send_failed_reply(t, error_code);
2177 } else {
2178 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2179 "undelivered transaction %d, %s\n",
2180 t->debug_id, reason);
2181 binder_free_transaction(t);
2182 }
2183}
2184
2185/**
Martijn Coenen00c80372016-07-13 12:06:49 +02002186 * binder_validate_object() - checks for a valid metadata object in a buffer.
2187 * @buffer: binder_buffer that we're parsing.
2188 * @offset: offset in the buffer at which to validate an object.
2189 *
2190 * Return: If there's a valid metadata object at @offset in @buffer, the
2191 * size of that object. Otherwise, it returns zero.
2192 */
2193static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2194{
2195 /* Check if we can read a header first */
2196 struct binder_object_header *hdr;
2197 size_t object_size = 0;
2198
Dan Carpentera1996892018-03-29 12:14:40 +03002199 if (buffer->data_size < sizeof(*hdr) ||
2200 offset > buffer->data_size - sizeof(*hdr) ||
Martijn Coenen00c80372016-07-13 12:06:49 +02002201 !IS_ALIGNED(offset, sizeof(u32)))
2202 return 0;
2203
2204 /* Ok, now see if we can read a complete object. */
2205 hdr = (struct binder_object_header *)(buffer->data + offset);
2206 switch (hdr->type) {
2207 case BINDER_TYPE_BINDER:
2208 case BINDER_TYPE_WEAK_BINDER:
2209 case BINDER_TYPE_HANDLE:
2210 case BINDER_TYPE_WEAK_HANDLE:
2211 object_size = sizeof(struct flat_binder_object);
2212 break;
2213 case BINDER_TYPE_FD:
2214 object_size = sizeof(struct binder_fd_object);
2215 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002216 case BINDER_TYPE_PTR:
2217 object_size = sizeof(struct binder_buffer_object);
2218 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002219 case BINDER_TYPE_FDA:
2220 object_size = sizeof(struct binder_fd_array_object);
2221 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02002222 default:
2223 return 0;
2224 }
2225 if (offset <= buffer->data_size - object_size &&
2226 buffer->data_size >= object_size)
2227 return object_size;
2228 else
2229 return 0;
2230}
2231
Martijn Coenen5a6da532016-09-30 14:10:07 +02002232/**
2233 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2234 * @b: binder_buffer containing the object
2235 * @index: index in offset array at which the binder_buffer_object is
2236 * located
2237 * @start: points to the start of the offset array
2238 * @num_valid: the number of valid offsets in the offset array
2239 *
2240 * Return: If @index is within the valid range of the offset array
2241 * described by @start and @num_valid, and if there's a valid
2242 * binder_buffer_object at the offset found in index @index
2243 * of the offset array, that object is returned. Otherwise,
2244 * %NULL is returned.
2245 * Note that the offset found in index @index itself is not
2246 * verified; this function assumes that @num_valid elements
2247 * from @start were previously verified to have valid offsets.
2248 */
2249static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2250 binder_size_t index,
2251 binder_size_t *start,
2252 binder_size_t num_valid)
2253{
2254 struct binder_buffer_object *buffer_obj;
2255 binder_size_t *offp;
2256
2257 if (index >= num_valid)
2258 return NULL;
2259
2260 offp = start + index;
2261 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2262 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2263 return NULL;
2264
2265 return buffer_obj;
2266}
2267
2268/**
2269 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2270 * @b: transaction buffer
2271 * @objects_start start of objects buffer
2272 * @buffer: binder_buffer_object in which to fix up
2273 * @offset: start offset in @buffer to fix up
2274 * @last_obj: last binder_buffer_object that we fixed up in
2275 * @last_min_offset: minimum fixup offset in @last_obj
2276 *
2277 * Return: %true if a fixup in buffer @buffer at offset @offset is
2278 * allowed.
2279 *
2280 * For safety reasons, we only allow fixups inside a buffer to happen
2281 * at increasing offsets; additionally, we only allow fixup on the last
2282 * buffer object that was verified, or one of its parents.
2283 *
2284 * Example of what is allowed:
2285 *
2286 * A
2287 * B (parent = A, offset = 0)
2288 * C (parent = A, offset = 16)
2289 * D (parent = C, offset = 0)
2290 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2291 *
2292 * Examples of what is not allowed:
2293 *
2294 * Decreasing offsets within the same parent:
2295 * A
2296 * C (parent = A, offset = 16)
2297 * B (parent = A, offset = 0) // decreasing offset within A
2298 *
2299 * Referring to a parent that wasn't the last object or any of its parents:
2300 * A
2301 * B (parent = A, offset = 0)
2302 * C (parent = A, offset = 0)
2303 * C (parent = A, offset = 16)
2304 * D (parent = B, offset = 0) // B is not A or any of A's parents
2305 */
2306static bool binder_validate_fixup(struct binder_buffer *b,
2307 binder_size_t *objects_start,
2308 struct binder_buffer_object *buffer,
2309 binder_size_t fixup_offset,
2310 struct binder_buffer_object *last_obj,
2311 binder_size_t last_min_offset)
2312{
2313 if (!last_obj) {
2314 /* Nothing to fix up in */
2315 return false;
2316 }
2317
2318 while (last_obj != buffer) {
2319 /*
2320 * Safe to retrieve the parent of last_obj, since it
2321 * was already previously verified by the driver.
2322 */
2323 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2324 return false;
2325 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2326 last_obj = (struct binder_buffer_object *)
2327 (b->data + *(objects_start + last_obj->parent));
2328 }
2329 return (fixup_offset >= last_min_offset);
2330}
2331
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002332static void binder_transaction_buffer_release(struct binder_proc *proc,
2333 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002334 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002335{
Martijn Coenen5a6da532016-09-30 14:10:07 +02002336 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002337 int debug_id = buffer->debug_id;
2338
2339 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302340 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002341 proc->pid, buffer->debug_id,
2342 buffer->data_size, buffer->offsets_size, failed_at);
2343
2344 if (buffer->target_node)
2345 binder_dec_node(buffer->target_node, 1, 0);
2346
Martijn Coenen5a6da532016-09-30 14:10:07 +02002347 off_start = (binder_size_t *)(buffer->data +
2348 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002349 if (failed_at)
2350 off_end = failed_at;
2351 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002352 off_end = (void *)off_start + buffer->offsets_size;
2353 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002354 struct binder_object_header *hdr;
2355 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002356
Martijn Coenen00c80372016-07-13 12:06:49 +02002357 if (object_size == 0) {
2358 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002359 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002360 continue;
2361 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002362 hdr = (struct binder_object_header *)(buffer->data + *offp);
2363 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002364 case BINDER_TYPE_BINDER:
2365 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002366 struct flat_binder_object *fp;
2367 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002368
Martijn Coenen00c80372016-07-13 12:06:49 +02002369 fp = to_flat_binder_object(hdr);
2370 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002371 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002372 pr_err("transaction release %d bad node %016llx\n",
2373 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002374 break;
2375 }
2376 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002377 " node %d u%016llx\n",
2378 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002379 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2380 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002381 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002382 } break;
2383 case BINDER_TYPE_HANDLE:
2384 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002385 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002386 struct binder_ref_data rdata;
2387 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002388
Martijn Coenen00c80372016-07-13 12:06:49 +02002389 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002390 ret = binder_dec_ref_for_handle(proc, fp->handle,
2391 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2392
2393 if (ret) {
2394 pr_err("transaction release %d bad handle %d, ret = %d\n",
2395 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002396 break;
2397 }
2398 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002399 " ref %d desc %d\n",
2400 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002401 } break;
2402
Martijn Coenen00c80372016-07-13 12:06:49 +02002403 case BINDER_TYPE_FD: {
2404 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2405
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002406 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002407 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002408 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002409 task_close_fd(proc, fp->fd);
2410 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002411 case BINDER_TYPE_PTR:
2412 /*
2413 * Nothing to do here, this will get cleaned up when the
2414 * transaction buffer gets freed
2415 */
2416 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002417 case BINDER_TYPE_FDA: {
2418 struct binder_fd_array_object *fda;
2419 struct binder_buffer_object *parent;
2420 uintptr_t parent_buffer;
2421 u32 *fd_array;
2422 size_t fd_index;
2423 binder_size_t fd_buf_size;
2424
2425 fda = to_binder_fd_array_object(hdr);
2426 parent = binder_validate_ptr(buffer, fda->parent,
2427 off_start,
2428 offp - off_start);
2429 if (!parent) {
2430 pr_err("transaction release %d bad parent offset",
2431 debug_id);
2432 continue;
2433 }
2434 /*
2435 * Since the parent was already fixed up, convert it
2436 * back to kernel address space to access it
2437 */
2438 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002439 binder_alloc_get_user_buffer_offset(
2440 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002441
2442 fd_buf_size = sizeof(u32) * fda->num_fds;
2443 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2444 pr_err("transaction release %d invalid number of fds (%lld)\n",
2445 debug_id, (u64)fda->num_fds);
2446 continue;
2447 }
2448 if (fd_buf_size > parent->length ||
2449 fda->parent_offset > parent->length - fd_buf_size) {
2450 /* No space for all file descriptors here. */
2451 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2452 debug_id, (u64)fda->num_fds);
2453 continue;
2454 }
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002455 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002456 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2457 task_close_fd(proc, fd_array[fd_index]);
2458 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002459 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002460 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002461 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002462 break;
2463 }
2464 }
2465}
2466
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002467static int binder_translate_binder(struct flat_binder_object *fp,
2468 struct binder_transaction *t,
2469 struct binder_thread *thread)
2470{
2471 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002472 struct binder_proc *proc = thread->proc;
2473 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002474 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002475 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002476
2477 node = binder_get_node(proc, fp->binder);
2478 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002479 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002480 if (!node)
2481 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002482 }
2483 if (fp->cookie != node->cookie) {
2484 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2485 proc->pid, thread->pid, (u64)fp->binder,
2486 node->debug_id, (u64)fp->cookie,
2487 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002488 ret = -EINVAL;
2489 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002490 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002491 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2492 ret = -EPERM;
2493 goto done;
2494 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002495
Todd Kjosb0117bb2017-05-08 09:16:27 -07002496 ret = binder_inc_ref_for_node(target_proc, node,
2497 fp->hdr.type == BINDER_TYPE_BINDER,
2498 &thread->todo, &rdata);
2499 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002500 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002501
2502 if (fp->hdr.type == BINDER_TYPE_BINDER)
2503 fp->hdr.type = BINDER_TYPE_HANDLE;
2504 else
2505 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2506 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002507 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002508 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002509
Todd Kjosb0117bb2017-05-08 09:16:27 -07002510 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002511 binder_debug(BINDER_DEBUG_TRANSACTION,
2512 " node %d u%016llx -> ref %d desc %d\n",
2513 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002514 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002515done:
2516 binder_put_node(node);
2517 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002518}
2519
2520static int binder_translate_handle(struct flat_binder_object *fp,
2521 struct binder_transaction *t,
2522 struct binder_thread *thread)
2523{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002524 struct binder_proc *proc = thread->proc;
2525 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002526 struct binder_node *node;
2527 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002528 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002529
Todd Kjosb0117bb2017-05-08 09:16:27 -07002530 node = binder_get_node_from_ref(proc, fp->handle,
2531 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2532 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002533 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2534 proc->pid, thread->pid, fp->handle);
2535 return -EINVAL;
2536 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002537 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2538 ret = -EPERM;
2539 goto done;
2540 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002541
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002542 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002543 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002544 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2545 fp->hdr.type = BINDER_TYPE_BINDER;
2546 else
2547 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002548 fp->binder = node->ptr;
2549 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002550 if (node->proc)
2551 binder_inner_proc_lock(node->proc);
2552 binder_inc_node_nilocked(node,
2553 fp->hdr.type == BINDER_TYPE_BINDER,
2554 0, NULL);
2555 if (node->proc)
2556 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002557 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002558 binder_debug(BINDER_DEBUG_TRANSACTION,
2559 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002560 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2561 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002562 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002563 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002564 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002565
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002566 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002567 ret = binder_inc_ref_for_node(target_proc, node,
2568 fp->hdr.type == BINDER_TYPE_HANDLE,
2569 NULL, &dest_rdata);
2570 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002571 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002572
2573 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002574 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002575 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002576 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2577 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002578 binder_debug(BINDER_DEBUG_TRANSACTION,
2579 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002580 src_rdata.debug_id, src_rdata.desc,
2581 dest_rdata.debug_id, dest_rdata.desc,
2582 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002583 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002584done:
2585 binder_put_node(node);
2586 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002587}
2588
2589static int binder_translate_fd(int fd,
2590 struct binder_transaction *t,
2591 struct binder_thread *thread,
2592 struct binder_transaction *in_reply_to)
2593{
2594 struct binder_proc *proc = thread->proc;
2595 struct binder_proc *target_proc = t->to_proc;
2596 int target_fd;
2597 struct file *file;
2598 int ret;
2599 bool target_allows_fd;
2600
2601 if (in_reply_to)
2602 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2603 else
2604 target_allows_fd = t->buffer->target_node->accept_fds;
2605 if (!target_allows_fd) {
2606 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2607 proc->pid, thread->pid,
2608 in_reply_to ? "reply" : "transaction",
2609 fd);
2610 ret = -EPERM;
2611 goto err_fd_not_accepted;
2612 }
2613
2614 file = fget(fd);
2615 if (!file) {
2616 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2617 proc->pid, thread->pid, fd);
2618 ret = -EBADF;
2619 goto err_fget;
2620 }
2621 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2622 if (ret < 0) {
2623 ret = -EPERM;
2624 goto err_security;
2625 }
2626
2627 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2628 if (target_fd < 0) {
2629 ret = -ENOMEM;
2630 goto err_get_unused_fd;
2631 }
2632 task_fd_install(target_proc, target_fd, file);
2633 trace_binder_transaction_fd(t, fd, target_fd);
2634 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2635 fd, target_fd);
2636
2637 return target_fd;
2638
2639err_get_unused_fd:
2640err_security:
2641 fput(file);
2642err_fget:
2643err_fd_not_accepted:
2644 return ret;
2645}
2646
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002647static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2648 struct binder_buffer_object *parent,
2649 struct binder_transaction *t,
2650 struct binder_thread *thread,
2651 struct binder_transaction *in_reply_to)
2652{
2653 binder_size_t fdi, fd_buf_size, num_installed_fds;
2654 int target_fd;
2655 uintptr_t parent_buffer;
2656 u32 *fd_array;
2657 struct binder_proc *proc = thread->proc;
2658 struct binder_proc *target_proc = t->to_proc;
2659
2660 fd_buf_size = sizeof(u32) * fda->num_fds;
2661 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2662 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2663 proc->pid, thread->pid, (u64)fda->num_fds);
2664 return -EINVAL;
2665 }
2666 if (fd_buf_size > parent->length ||
2667 fda->parent_offset > parent->length - fd_buf_size) {
2668 /* No space for all file descriptors here. */
2669 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2670 proc->pid, thread->pid, (u64)fda->num_fds);
2671 return -EINVAL;
2672 }
2673 /*
2674 * Since the parent was already fixed up, convert it
2675 * back to the kernel address space to access it
2676 */
Todd Kjosd325d372016-10-10 10:40:53 -07002677 parent_buffer = parent->buffer -
2678 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002679 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002680 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2681 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2682 proc->pid, thread->pid);
2683 return -EINVAL;
2684 }
2685 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2686 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2687 in_reply_to);
2688 if (target_fd < 0)
2689 goto err_translate_fd_failed;
2690 fd_array[fdi] = target_fd;
2691 }
2692 return 0;
2693
2694err_translate_fd_failed:
2695 /*
2696 * Failed to allocate fd or security error, free fds
2697 * installed so far.
2698 */
2699 num_installed_fds = fdi;
2700 for (fdi = 0; fdi < num_installed_fds; fdi++)
2701 task_close_fd(target_proc, fd_array[fdi]);
2702 return target_fd;
2703}
2704
Martijn Coenen5a6da532016-09-30 14:10:07 +02002705static int binder_fixup_parent(struct binder_transaction *t,
2706 struct binder_thread *thread,
2707 struct binder_buffer_object *bp,
2708 binder_size_t *off_start,
2709 binder_size_t num_valid,
2710 struct binder_buffer_object *last_fixup_obj,
2711 binder_size_t last_fixup_min_off)
2712{
2713 struct binder_buffer_object *parent;
2714 u8 *parent_buffer;
2715 struct binder_buffer *b = t->buffer;
2716 struct binder_proc *proc = thread->proc;
2717 struct binder_proc *target_proc = t->to_proc;
2718
2719 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2720 return 0;
2721
2722 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2723 if (!parent) {
2724 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2725 proc->pid, thread->pid);
2726 return -EINVAL;
2727 }
2728
2729 if (!binder_validate_fixup(b, off_start,
2730 parent, bp->parent_offset,
2731 last_fixup_obj,
2732 last_fixup_min_off)) {
2733 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2734 proc->pid, thread->pid);
2735 return -EINVAL;
2736 }
2737
2738 if (parent->length < sizeof(binder_uintptr_t) ||
2739 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2740 /* No space for a pointer here! */
2741 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2742 proc->pid, thread->pid);
2743 return -EINVAL;
2744 }
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002745 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002746 binder_alloc_get_user_buffer_offset(
2747 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002748 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2749
2750 return 0;
2751}
2752
Martijn Coenen053be422017-06-06 15:17:46 -07002753/**
2754 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2755 * @t: transaction to send
2756 * @proc: process to send the transaction to
2757 * @thread: thread in @proc to send the transaction to (may be NULL)
2758 *
2759 * This function queues a transaction to the specified process. It will try
2760 * to find a thread in the target process to handle the transaction and
2761 * wake it up. If no thread is found, the work is queued to the proc
2762 * waitqueue.
2763 *
2764 * If the @thread parameter is not NULL, the transaction is always queued
2765 * to the waitlist of that specific thread.
2766 *
2767 * Return: true if the transactions was successfully queued
2768 * false if the target process or thread is dead
2769 */
2770static bool binder_proc_transaction(struct binder_transaction *t,
2771 struct binder_proc *proc,
2772 struct binder_thread *thread)
2773{
Martijn Coenen053be422017-06-06 15:17:46 -07002774 struct binder_node *node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002775 struct binder_priority node_prio;
Martijn Coenen053be422017-06-06 15:17:46 -07002776 bool oneway = !!(t->flags & TF_ONE_WAY);
Martijn Coenen1af61802017-10-19 15:04:46 +02002777 bool pending_async = false;
Martijn Coenen053be422017-06-06 15:17:46 -07002778
2779 BUG_ON(!node);
2780 binder_node_lock(node);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002781 node_prio.prio = node->min_priority;
2782 node_prio.sched_policy = node->sched_policy;
2783
Martijn Coenen053be422017-06-06 15:17:46 -07002784 if (oneway) {
2785 BUG_ON(thread);
2786 if (node->has_async_transaction) {
Martijn Coenen1af61802017-10-19 15:04:46 +02002787 pending_async = true;
Martijn Coenen053be422017-06-06 15:17:46 -07002788 } else {
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -06002789 node->has_async_transaction = true;
Martijn Coenen053be422017-06-06 15:17:46 -07002790 }
2791 }
2792
2793 binder_inner_proc_lock(proc);
2794
2795 if (proc->is_dead || (thread && thread->is_dead)) {
2796 binder_inner_proc_unlock(proc);
2797 binder_node_unlock(node);
2798 return false;
2799 }
2800
Martijn Coenen1af61802017-10-19 15:04:46 +02002801 if (!thread && !pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002802 thread = binder_select_thread_ilocked(proc);
2803
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002804 if (thread) {
Martijn Coenenc46810c2017-06-23 10:13:43 -07002805 binder_transaction_priority(thread->task, t, node_prio,
2806 node->inherit_rt);
Martijn Coenen1af61802017-10-19 15:04:46 +02002807 binder_enqueue_thread_work_ilocked(thread, &t->work);
2808 } else if (!pending_async) {
2809 binder_enqueue_work_ilocked(&t->work, &proc->todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002810 } else {
Martijn Coenen1af61802017-10-19 15:04:46 +02002811 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002812 }
Martijn Coenen053be422017-06-06 15:17:46 -07002813
Martijn Coenen1af61802017-10-19 15:04:46 +02002814 if (!pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002815 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2816
2817 binder_inner_proc_unlock(proc);
2818 binder_node_unlock(node);
2819
2820 return true;
2821}
2822
Todd Kjos291d9682017-09-25 08:55:09 -07002823/**
2824 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2825 * @node: struct binder_node for which to get refs
2826 * @proc: returns @node->proc if valid
2827 * @error: if no @proc then returns BR_DEAD_REPLY
2828 *
2829 * User-space normally keeps the node alive when creating a transaction
2830 * since it has a reference to the target. The local strong ref keeps it
2831 * alive if the sending process dies before the target process processes
2832 * the transaction. If the source process is malicious or has a reference
2833 * counting bug, relying on the local strong ref can fail.
2834 *
2835 * Since user-space can cause the local strong ref to go away, we also take
2836 * a tmpref on the node to ensure it survives while we are constructing
2837 * the transaction. We also need a tmpref on the proc while we are
2838 * constructing the transaction, so we take that here as well.
2839 *
2840 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2841 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2842 * target proc has died, @error is set to BR_DEAD_REPLY
2843 */
2844static struct binder_node *binder_get_node_refs_for_txn(
2845 struct binder_node *node,
2846 struct binder_proc **procp,
2847 uint32_t *error)
2848{
2849 struct binder_node *target_node = NULL;
2850
2851 binder_node_inner_lock(node);
2852 if (node->proc) {
2853 target_node = node;
2854 binder_inc_node_nilocked(node, 1, 0, NULL);
2855 binder_inc_node_tmpref_ilocked(node);
2856 node->proc->tmp_ref++;
2857 *procp = node->proc;
2858 } else
2859 *error = BR_DEAD_REPLY;
2860 binder_node_inner_unlock(node);
2861
2862 return target_node;
2863}
2864
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002865static void binder_transaction(struct binder_proc *proc,
2866 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002867 struct binder_transaction_data *tr, int reply,
2868 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002869{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002870 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002871 struct binder_transaction *t;
2872 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002873 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002874 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002875 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002876 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002877 struct binder_thread *target_thread = NULL;
2878 struct binder_node *target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002879 struct binder_transaction *in_reply_to = NULL;
2880 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002881 uint32_t return_error = 0;
2882 uint32_t return_error_param = 0;
2883 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002884 struct binder_buffer_object *last_fixup_obj = NULL;
2885 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002886 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002887 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002888
2889 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002890 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002891 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2892 e->from_proc = proc->pid;
2893 e->from_thread = thread->pid;
2894 e->target_handle = tr->target.handle;
2895 e->data_size = tr->data_size;
2896 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002897 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002898
2899 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002900 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002901 in_reply_to = thread->transaction_stack;
2902 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002903 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302904 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002905 proc->pid, thread->pid);
2906 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002907 return_error_param = -EPROTO;
2908 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002909 goto err_empty_call_stack;
2910 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002911 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002912 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302913 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002914 proc->pid, thread->pid, in_reply_to->debug_id,
2915 in_reply_to->to_proc ?
2916 in_reply_to->to_proc->pid : 0,
2917 in_reply_to->to_thread ?
2918 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002919 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002920 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002921 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002922 return_error_param = -EPROTO;
2923 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002924 in_reply_to = NULL;
2925 goto err_bad_call_stack;
2926 }
2927 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002928 binder_inner_proc_unlock(proc);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002929 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002930 if (target_thread == NULL) {
2931 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002932 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002933 goto err_dead_binder;
2934 }
2935 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302936 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002937 proc->pid, thread->pid,
2938 target_thread->transaction_stack ?
2939 target_thread->transaction_stack->debug_id : 0,
2940 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002941 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002942 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002943 return_error_param = -EPROTO;
2944 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002945 in_reply_to = NULL;
2946 target_thread = NULL;
2947 goto err_dead_binder;
2948 }
2949 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002950 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002951 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002952 } else {
2953 if (tr->target.handle) {
2954 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002955
Todd Kjosc37162d2017-05-26 11:56:29 -07002956 /*
2957 * There must already be a strong ref
2958 * on this node. If so, do a strong
2959 * increment on the node to ensure it
2960 * stays alive until the transaction is
2961 * done.
2962 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002963 binder_proc_lock(proc);
2964 ref = binder_get_ref_olocked(proc, tr->target.handle,
2965 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002966 if (ref) {
Todd Kjos291d9682017-09-25 08:55:09 -07002967 target_node = binder_get_node_refs_for_txn(
2968 ref->node, &target_proc,
2969 &return_error);
2970 } else {
2971 binder_user_error("%d:%d got transaction to invalid handle\n",
2972 proc->pid, thread->pid);
2973 return_error = BR_FAILED_REPLY;
Todd Kjosc37162d2017-05-26 11:56:29 -07002974 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002975 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002976 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002977 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002978 target_node = context->binder_context_mgr_node;
Todd Kjos291d9682017-09-25 08:55:09 -07002979 if (target_node)
2980 target_node = binder_get_node_refs_for_txn(
2981 target_node, &target_proc,
2982 &return_error);
2983 else
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002984 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002985 mutex_unlock(&context->context_mgr_node_lock);
Martijn Coenenc4048b22018-03-28 11:14:50 +02002986 if (target_node && target_proc == proc) {
2987 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2988 proc->pid, thread->pid);
2989 return_error = BR_FAILED_REPLY;
2990 return_error_param = -EINVAL;
2991 return_error_line = __LINE__;
2992 goto err_invalid_target_handle;
2993 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002994 }
Todd Kjos291d9682017-09-25 08:55:09 -07002995 if (!target_node) {
2996 /*
2997 * return_error is set above
2998 */
2999 return_error_param = -EINVAL;
Todd Kjose598d172017-03-22 17:19:52 -07003000 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003001 goto err_dead_binder;
3002 }
Todd Kjos291d9682017-09-25 08:55:09 -07003003 e->to_node = target_node->debug_id;
Stephen Smalley79af7302015-01-21 10:54:10 -05003004 if (security_binder_transaction(proc->tsk,
3005 target_proc->tsk) < 0) {
3006 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003007 return_error_param = -EPERM;
3008 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05003009 goto err_invalid_target_handle;
3010 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003011 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003012 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3013 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003014
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003015 tmp = thread->transaction_stack;
3016 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003017 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05303018 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003019 proc->pid, thread->pid, tmp->debug_id,
3020 tmp->to_proc ? tmp->to_proc->pid : 0,
3021 tmp->to_thread ?
3022 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07003023 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003024 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003025 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003026 return_error_param = -EPROTO;
3027 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003028 goto err_bad_call_stack;
3029 }
3030 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003031 struct binder_thread *from;
3032
3033 spin_lock(&tmp->lock);
3034 from = tmp->from;
3035 if (from && from->proc == target_proc) {
3036 atomic_inc(&from->tmp_ref);
3037 target_thread = from;
3038 spin_unlock(&tmp->lock);
3039 break;
3040 }
3041 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003042 tmp = tmp->from_parent;
3043 }
3044 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003045 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003046 }
Martijn Coenen053be422017-06-06 15:17:46 -07003047 if (target_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003048 e->to_thread = target_thread->pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003049 e->to_proc = target_proc->pid;
3050
3051 /* TODO: reuse incoming transaction for reply */
3052 t = kzalloc(sizeof(*t), GFP_KERNEL);
3053 if (t == NULL) {
3054 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003055 return_error_param = -ENOMEM;
3056 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003057 goto err_alloc_t_failed;
3058 }
3059 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07003060 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003061
3062 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3063 if (tcomplete == NULL) {
3064 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003065 return_error_param = -ENOMEM;
3066 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003067 goto err_alloc_tcomplete_failed;
3068 }
3069 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3070
Todd Kjos1cfe6272017-05-24 13:33:28 -07003071 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003072
3073 if (reply)
3074 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003075 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003076 proc->pid, thread->pid, t->debug_id,
3077 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003078 (u64)tr->data.ptr.buffer,
3079 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003080 (u64)tr->data_size, (u64)tr->offsets_size,
3081 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003082 else
3083 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003084 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003085 proc->pid, thread->pid, t->debug_id,
3086 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003087 (u64)tr->data.ptr.buffer,
3088 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003089 (u64)tr->data_size, (u64)tr->offsets_size,
3090 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003091
3092 if (!reply && !(tr->flags & TF_ONE_WAY))
3093 t->from = thread;
3094 else
3095 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03003096 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003097 t->to_proc = target_proc;
3098 t->to_thread = target_thread;
3099 t->code = tr->code;
3100 t->flags = tr->flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07003101 if (!(t->flags & TF_ONE_WAY) &&
3102 binder_supported_policy(current->policy)) {
3103 /* Inherit supported policies for synchronous transactions */
3104 t->priority.sched_policy = current->policy;
3105 t->priority.prio = current->normal_prio;
3106 } else {
3107 /* Otherwise, fall back to the default priority */
3108 t->priority = target_proc->default_priority;
3109 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003110
3111 trace_binder_transaction(reply, t, target_node);
3112
Todd Kjosd325d372016-10-10 10:40:53 -07003113 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02003114 tr->offsets_size, extra_buffers_size,
3115 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07003116 if (IS_ERR(t->buffer)) {
3117 /*
3118 * -ESRCH indicates VMA cleared. The target is dying.
3119 */
3120 return_error_param = PTR_ERR(t->buffer);
3121 return_error = return_error_param == -ESRCH ?
3122 BR_DEAD_REPLY : BR_FAILED_REPLY;
3123 return_error_line = __LINE__;
3124 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003125 goto err_binder_alloc_buf_failed;
3126 }
3127 t->buffer->allow_user_free = 0;
3128 t->buffer->debug_id = t->debug_id;
3129 t->buffer->transaction = t;
3130 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003131 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003132 off_start = (binder_size_t *)(t->buffer->data +
3133 ALIGN(tr->data_size, sizeof(void *)));
3134 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003135
Arve Hjønnevågda498892014-02-21 14:40:26 -08003136 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3137 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303138 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3139 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003140 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003141 return_error_param = -EFAULT;
3142 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003143 goto err_copy_data_failed;
3144 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003145 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3146 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303147 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3148 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003149 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003150 return_error_param = -EFAULT;
3151 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003152 goto err_copy_data_failed;
3153 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003154 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3155 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3156 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003157 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003158 return_error_param = -EINVAL;
3159 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003160 goto err_bad_offset;
3161 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02003162 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3163 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3164 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05303165 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003166 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003167 return_error_param = -EINVAL;
3168 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003169 goto err_bad_offset;
3170 }
3171 off_end = (void *)off_start + tr->offsets_size;
3172 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3173 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003174 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003175 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02003176 struct binder_object_header *hdr;
3177 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09003178
Martijn Coenen00c80372016-07-13 12:06:49 +02003179 if (object_size == 0 || *offp < off_min) {
3180 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003181 proc->pid, thread->pid, (u64)*offp,
3182 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02003183 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003184 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003185 return_error_param = -EINVAL;
3186 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003187 goto err_bad_offset;
3188 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003189
3190 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3191 off_min = *offp + object_size;
3192 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003193 case BINDER_TYPE_BINDER:
3194 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003195 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003196
Martijn Coenen00c80372016-07-13 12:06:49 +02003197 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003198 ret = binder_translate_binder(fp, t, thread);
3199 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02003200 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003201 return_error_param = ret;
3202 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003203 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003204 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003205 } break;
3206 case BINDER_TYPE_HANDLE:
3207 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003208 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003209
Martijn Coenen00c80372016-07-13 12:06:49 +02003210 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003211 ret = binder_translate_handle(fp, t, thread);
3212 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003213 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003214 return_error_param = ret;
3215 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003216 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003217 }
3218 } break;
3219
3220 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003221 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003222 int target_fd = binder_translate_fd(fp->fd, t, thread,
3223 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003224
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003225 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003226 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003227 return_error_param = target_fd;
3228 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003229 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003230 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003231 fp->pad_binder = 0;
3232 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003233 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003234 case BINDER_TYPE_FDA: {
3235 struct binder_fd_array_object *fda =
3236 to_binder_fd_array_object(hdr);
3237 struct binder_buffer_object *parent =
3238 binder_validate_ptr(t->buffer, fda->parent,
3239 off_start,
3240 offp - off_start);
3241 if (!parent) {
3242 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3243 proc->pid, thread->pid);
3244 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003245 return_error_param = -EINVAL;
3246 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003247 goto err_bad_parent;
3248 }
3249 if (!binder_validate_fixup(t->buffer, off_start,
3250 parent, fda->parent_offset,
3251 last_fixup_obj,
3252 last_fixup_min_off)) {
3253 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3254 proc->pid, thread->pid);
3255 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003256 return_error_param = -EINVAL;
3257 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003258 goto err_bad_parent;
3259 }
3260 ret = binder_translate_fd_array(fda, parent, t, thread,
3261 in_reply_to);
3262 if (ret < 0) {
3263 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003264 return_error_param = ret;
3265 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003266 goto err_translate_failed;
3267 }
3268 last_fixup_obj = parent;
3269 last_fixup_min_off =
3270 fda->parent_offset + sizeof(u32) * fda->num_fds;
3271 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003272 case BINDER_TYPE_PTR: {
3273 struct binder_buffer_object *bp =
3274 to_binder_buffer_object(hdr);
3275 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003276
Martijn Coenen5a6da532016-09-30 14:10:07 +02003277 if (bp->length > buf_left) {
3278 binder_user_error("%d:%d got transaction with too large buffer\n",
3279 proc->pid, thread->pid);
3280 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003281 return_error_param = -EINVAL;
3282 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003283 goto err_bad_offset;
3284 }
3285 if (copy_from_user(sg_bufp,
3286 (const void __user *)(uintptr_t)
3287 bp->buffer, bp->length)) {
3288 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3289 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07003290 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003291 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003292 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003293 goto err_copy_data_failed;
3294 }
3295 /* Fixup buffer pointer to target proc address space */
3296 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07003297 binder_alloc_get_user_buffer_offset(
3298 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003299 sg_bufp += ALIGN(bp->length, sizeof(u64));
3300
3301 ret = binder_fixup_parent(t, thread, bp, off_start,
3302 offp - off_start,
3303 last_fixup_obj,
3304 last_fixup_min_off);
3305 if (ret < 0) {
3306 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003307 return_error_param = ret;
3308 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003309 goto err_translate_failed;
3310 }
3311 last_fixup_obj = bp;
3312 last_fixup_min_off = 0;
3313 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003314 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01003315 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02003316 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003317 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003318 return_error_param = -EINVAL;
3319 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003320 goto err_bad_object_type;
3321 }
3322 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003323 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003324 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003325
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003326 if (reply) {
Martijn Coenen1af61802017-10-19 15:04:46 +02003327 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003328 binder_inner_proc_lock(target_proc);
3329 if (target_thread->is_dead) {
3330 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003331 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003332 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003333 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003334 binder_pop_transaction_ilocked(target_thread, in_reply_to);
Martijn Coenen1af61802017-10-19 15:04:46 +02003335 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003336 binder_inner_proc_unlock(target_proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003337 wake_up_interruptible_sync(&target_thread->wait);
Martijn Coenenecd972d2017-05-26 10:48:56 -07003338 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003339 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003340 } else if (!(t->flags & TF_ONE_WAY)) {
3341 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003342 binder_inner_proc_lock(proc);
Martijn Coenendac2e9c2017-11-13 09:55:21 +01003343 /*
3344 * Defer the TRANSACTION_COMPLETE, so we don't return to
3345 * userspace immediately; this allows the target process to
3346 * immediately start processing this transaction, reducing
3347 * latency. We will then return the TRANSACTION_COMPLETE when
3348 * the target replies (or there is an error).
3349 */
3350 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003351 t->need_reply = 1;
3352 t->from_parent = thread->transaction_stack;
3353 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003354 binder_inner_proc_unlock(proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003355 if (!binder_proc_transaction(t, target_proc, target_thread)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003356 binder_inner_proc_lock(proc);
3357 binder_pop_transaction_ilocked(thread, t);
3358 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003359 goto err_dead_proc_or_thread;
3360 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003361 } else {
3362 BUG_ON(target_node == NULL);
3363 BUG_ON(t->buffer->async_transaction != 1);
Martijn Coenen1af61802017-10-19 15:04:46 +02003364 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen053be422017-06-06 15:17:46 -07003365 if (!binder_proc_transaction(t, target_proc, NULL))
Todd Kjos2f993e22017-05-12 14:42:55 -07003366 goto err_dead_proc_or_thread;
Riley Andrewsb5968812015-09-01 12:42:07 -07003367 }
Todd Kjos2f993e22017-05-12 14:42:55 -07003368 if (target_thread)
3369 binder_thread_dec_tmpref(target_thread);
3370 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003371 if (target_node)
3372 binder_dec_node_tmpref(target_node);
Todd Kjos1cfe6272017-05-24 13:33:28 -07003373 /*
3374 * write barrier to synchronize with initialization
3375 * of log entry
3376 */
3377 smp_wmb();
3378 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003379 return;
3380
Todd Kjos2f993e22017-05-12 14:42:55 -07003381err_dead_proc_or_thread:
3382 return_error = BR_DEAD_REPLY;
3383 return_error_line = __LINE__;
Xu YiPing86578a02017-05-22 11:26:23 -07003384 binder_dequeue_work(proc, tcomplete);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003385err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003386err_bad_object_type:
3387err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003388err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003389err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003390 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003391 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjos291d9682017-09-25 08:55:09 -07003392 if (target_node)
3393 binder_dec_node_tmpref(target_node);
Todd Kjosc37162d2017-05-26 11:56:29 -07003394 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003395 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07003396 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003397err_binder_alloc_buf_failed:
3398 kfree(tcomplete);
3399 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3400err_alloc_tcomplete_failed:
3401 kfree(t);
3402 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3403err_alloc_t_failed:
3404err_bad_call_stack:
3405err_empty_call_stack:
3406err_dead_binder:
3407err_invalid_target_handle:
Todd Kjos2f993e22017-05-12 14:42:55 -07003408 if (target_thread)
3409 binder_thread_dec_tmpref(target_thread);
3410 if (target_proc)
3411 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003412 if (target_node) {
Todd Kjosc37162d2017-05-26 11:56:29 -07003413 binder_dec_node(target_node, 1, 0);
Todd Kjos291d9682017-09-25 08:55:09 -07003414 binder_dec_node_tmpref(target_node);
3415 }
Todd Kjosc37162d2017-05-26 11:56:29 -07003416
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003417 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07003418 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3419 proc->pid, thread->pid, return_error, return_error_param,
3420 (u64)tr->data_size, (u64)tr->offsets_size,
3421 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003422
3423 {
3424 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09003425
Todd Kjose598d172017-03-22 17:19:52 -07003426 e->return_error = return_error;
3427 e->return_error_param = return_error_param;
3428 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003429 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3430 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07003431 /*
3432 * write barrier to synchronize with initialization
3433 * of log entry
3434 */
3435 smp_wmb();
3436 WRITE_ONCE(e->debug_id_done, t_debug_id);
3437 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003438 }
3439
Todd Kjos858b8da2017-04-21 17:35:12 -07003440 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003441 if (in_reply_to) {
Martijn Coenenecd972d2017-05-26 10:48:56 -07003442 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos858b8da2017-04-21 17:35:12 -07003443 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Martijn Coenen1af61802017-10-19 15:04:46 +02003444 binder_enqueue_thread_work(thread, &thread->return_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003445 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07003446 } else {
3447 thread->return_error.cmd = return_error;
Martijn Coenen1af61802017-10-19 15:04:46 +02003448 binder_enqueue_thread_work(thread, &thread->return_error.work);
Todd Kjos858b8da2017-04-21 17:35:12 -07003449 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003450}
3451
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003452static int binder_thread_write(struct binder_proc *proc,
3453 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003454 binder_uintptr_t binder_buffer, size_t size,
3455 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003456{
3457 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003458 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003459 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003460 void __user *ptr = buffer + *consumed;
3461 void __user *end = buffer + size;
3462
Todd Kjos858b8da2017-04-21 17:35:12 -07003463 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003464 int ret;
3465
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003466 if (get_user(cmd, (uint32_t __user *)ptr))
3467 return -EFAULT;
3468 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003469 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003470 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003471 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3472 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3473 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003474 }
3475 switch (cmd) {
3476 case BC_INCREFS:
3477 case BC_ACQUIRE:
3478 case BC_RELEASE:
3479 case BC_DECREFS: {
3480 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003481 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003482 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3483 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3484 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003485
3486 if (get_user(target, (uint32_t __user *)ptr))
3487 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003488
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003489 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003490 ret = -1;
3491 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003492 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003493 mutex_lock(&context->context_mgr_node_lock);
3494 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003495 if (ctx_mgr_node)
3496 ret = binder_inc_ref_for_node(
3497 proc, ctx_mgr_node,
3498 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003499 mutex_unlock(&context->context_mgr_node_lock);
3500 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003501 if (ret)
3502 ret = binder_update_ref_for_handle(
3503 proc, target, increment, strong,
3504 &rdata);
3505 if (!ret && rdata.desc != target) {
3506 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3507 proc->pid, thread->pid,
3508 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003509 }
3510 switch (cmd) {
3511 case BC_INCREFS:
3512 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003513 break;
3514 case BC_ACQUIRE:
3515 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003516 break;
3517 case BC_RELEASE:
3518 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003519 break;
3520 case BC_DECREFS:
3521 default:
3522 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003523 break;
3524 }
3525 if (ret) {
3526 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3527 proc->pid, thread->pid, debug_string,
3528 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003529 break;
3530 }
3531 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003532 "%d:%d %s ref %d desc %d s %d w %d\n",
3533 proc->pid, thread->pid, debug_string,
3534 rdata.debug_id, rdata.desc, rdata.strong,
3535 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003536 break;
3537 }
3538 case BC_INCREFS_DONE:
3539 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003540 binder_uintptr_t node_ptr;
3541 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003542 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003543 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003544
Arve Hjønnevågda498892014-02-21 14:40:26 -08003545 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003546 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003547 ptr += sizeof(binder_uintptr_t);
3548 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003549 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003550 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003551 node = binder_get_node(proc, node_ptr);
3552 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003553 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003554 proc->pid, thread->pid,
3555 cmd == BC_INCREFS_DONE ?
3556 "BC_INCREFS_DONE" :
3557 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003558 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003559 break;
3560 }
3561 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003562 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003563 proc->pid, thread->pid,
3564 cmd == BC_INCREFS_DONE ?
3565 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003566 (u64)node_ptr, node->debug_id,
3567 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003568 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003569 break;
3570 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003571 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003572 if (cmd == BC_ACQUIRE_DONE) {
3573 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303574 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003575 proc->pid, thread->pid,
3576 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003577 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003578 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003579 break;
3580 }
3581 node->pending_strong_ref = 0;
3582 } else {
3583 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303584 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003585 proc->pid, thread->pid,
3586 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003587 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003588 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003589 break;
3590 }
3591 node->pending_weak_ref = 0;
3592 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003593 free_node = binder_dec_node_nilocked(node,
3594 cmd == BC_ACQUIRE_DONE, 0);
3595 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003596 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003597 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003598 proc->pid, thread->pid,
3599 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003600 node->debug_id, node->local_strong_refs,
3601 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003602 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003603 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003604 break;
3605 }
3606 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303607 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003608 return -EINVAL;
3609 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303610 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003611 return -EINVAL;
3612
3613 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003614 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003615 struct binder_buffer *buffer;
3616
Arve Hjønnevågda498892014-02-21 14:40:26 -08003617 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003618 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003619 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003620
Todd Kjos076072a2017-04-21 14:32:11 -07003621 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3622 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003623 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003624 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3625 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003626 break;
3627 }
3628 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003629 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3630 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003631 break;
3632 }
3633 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003634 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3635 proc->pid, thread->pid, (u64)data_ptr,
3636 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003637 buffer->transaction ? "active" : "finished");
3638
3639 if (buffer->transaction) {
3640 buffer->transaction->buffer = NULL;
3641 buffer->transaction = NULL;
3642 }
3643 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003644 struct binder_node *buf_node;
3645 struct binder_work *w;
3646
3647 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003648 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003649 BUG_ON(!buf_node->has_async_transaction);
3650 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003651 w = binder_dequeue_work_head_ilocked(
3652 &buf_node->async_todo);
Martijn Coenen4501c042017-08-10 13:56:16 +02003653 if (!w) {
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -06003654 buf_node->has_async_transaction = false;
Martijn Coenen4501c042017-08-10 13:56:16 +02003655 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003656 binder_enqueue_work_ilocked(
Martijn Coenen4501c042017-08-10 13:56:16 +02003657 w, &proc->todo);
3658 binder_wakeup_proc_ilocked(proc);
3659 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003660 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003661 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003662 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003663 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003664 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003665 break;
3666 }
3667
Martijn Coenen5a6da532016-09-30 14:10:07 +02003668 case BC_TRANSACTION_SG:
3669 case BC_REPLY_SG: {
3670 struct binder_transaction_data_sg tr;
3671
3672 if (copy_from_user(&tr, ptr, sizeof(tr)))
3673 return -EFAULT;
3674 ptr += sizeof(tr);
3675 binder_transaction(proc, thread, &tr.transaction_data,
3676 cmd == BC_REPLY_SG, tr.buffers_size);
3677 break;
3678 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003679 case BC_TRANSACTION:
3680 case BC_REPLY: {
3681 struct binder_transaction_data tr;
3682
3683 if (copy_from_user(&tr, ptr, sizeof(tr)))
3684 return -EFAULT;
3685 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003686 binder_transaction(proc, thread, &tr,
3687 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003688 break;
3689 }
3690
3691 case BC_REGISTER_LOOPER:
3692 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303693 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003694 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003695 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003696 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3697 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303698 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003699 proc->pid, thread->pid);
3700 } else if (proc->requested_threads == 0) {
3701 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303702 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003703 proc->pid, thread->pid);
3704 } else {
3705 proc->requested_threads--;
3706 proc->requested_threads_started++;
3707 }
3708 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003709 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003710 break;
3711 case BC_ENTER_LOOPER:
3712 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303713 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003714 proc->pid, thread->pid);
3715 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3716 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303717 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003718 proc->pid, thread->pid);
3719 }
3720 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3721 break;
3722 case BC_EXIT_LOOPER:
3723 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303724 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003725 proc->pid, thread->pid);
3726 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3727 break;
3728
3729 case BC_REQUEST_DEATH_NOTIFICATION:
3730 case BC_CLEAR_DEATH_NOTIFICATION: {
3731 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003732 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003733 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003734 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003735
3736 if (get_user(target, (uint32_t __user *)ptr))
3737 return -EFAULT;
3738 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003739 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003740 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003741 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003742 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3743 /*
3744 * Allocate memory for death notification
3745 * before taking lock
3746 */
3747 death = kzalloc(sizeof(*death), GFP_KERNEL);
3748 if (death == NULL) {
3749 WARN_ON(thread->return_error.cmd !=
3750 BR_OK);
3751 thread->return_error.cmd = BR_ERROR;
Martijn Coenen1af61802017-10-19 15:04:46 +02003752 binder_enqueue_thread_work(
3753 thread,
3754 &thread->return_error.work);
Todd Kjos5346bf32016-10-20 16:43:34 -07003755 binder_debug(
3756 BINDER_DEBUG_FAILED_TRANSACTION,
3757 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3758 proc->pid, thread->pid);
3759 break;
3760 }
3761 }
3762 binder_proc_lock(proc);
3763 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003764 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303765 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003766 proc->pid, thread->pid,
3767 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3768 "BC_REQUEST_DEATH_NOTIFICATION" :
3769 "BC_CLEAR_DEATH_NOTIFICATION",
3770 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003771 binder_proc_unlock(proc);
3772 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003773 break;
3774 }
3775
3776 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003777 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003778 proc->pid, thread->pid,
3779 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3780 "BC_REQUEST_DEATH_NOTIFICATION" :
3781 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003782 (u64)cookie, ref->data.debug_id,
3783 ref->data.desc, ref->data.strong,
3784 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003785
Martijn Coenenf9eac642017-05-22 11:26:23 -07003786 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003787 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3788 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303789 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003790 proc->pid, thread->pid);
Martijn Coenenf9eac642017-05-22 11:26:23 -07003791 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003792 binder_proc_unlock(proc);
3793 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003794 break;
3795 }
3796 binder_stats_created(BINDER_STAT_DEATH);
3797 INIT_LIST_HEAD(&death->work.entry);
3798 death->cookie = cookie;
3799 ref->death = death;
3800 if (ref->node->proc == NULL) {
3801 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Martijn Coenen3bdbe4c2017-08-10 13:50:52 +02003802
3803 binder_inner_proc_lock(proc);
3804 binder_enqueue_work_ilocked(
3805 &ref->death->work, &proc->todo);
3806 binder_wakeup_proc_ilocked(proc);
3807 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003808 }
3809 } else {
3810 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303811 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003812 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003813 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003814 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003815 break;
3816 }
3817 death = ref->death;
3818 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003819 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003820 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003821 (u64)death->cookie,
3822 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003823 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003824 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003825 break;
3826 }
3827 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003828 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003829 if (list_empty(&death->work.entry)) {
3830 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003831 if (thread->looper &
3832 (BINDER_LOOPER_STATE_REGISTERED |
3833 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003834 binder_enqueue_thread_work_ilocked(
3835 thread,
3836 &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003837 else {
3838 binder_enqueue_work_ilocked(
3839 &death->work,
3840 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003841 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003842 proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003843 }
3844 } else {
3845 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3846 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3847 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003848 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003849 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003850 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003851 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003852 } break;
3853 case BC_DEAD_BINDER_DONE: {
3854 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003855 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003856 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003857
Arve Hjønnevågda498892014-02-21 14:40:26 -08003858 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003859 return -EFAULT;
3860
Lisa Du7a64cd82016-02-17 09:32:52 +08003861 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003862 binder_inner_proc_lock(proc);
3863 list_for_each_entry(w, &proc->delivered_death,
3864 entry) {
3865 struct binder_ref_death *tmp_death =
3866 container_of(w,
3867 struct binder_ref_death,
3868 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003869
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003870 if (tmp_death->cookie == cookie) {
3871 death = tmp_death;
3872 break;
3873 }
3874 }
3875 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003876 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3877 proc->pid, thread->pid, (u64)cookie,
3878 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003879 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003880 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3881 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003882 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003883 break;
3884 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003885 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003886 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3887 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003888 if (thread->looper &
3889 (BINDER_LOOPER_STATE_REGISTERED |
3890 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003891 binder_enqueue_thread_work_ilocked(
3892 thread, &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003893 else {
3894 binder_enqueue_work_ilocked(
3895 &death->work,
3896 &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07003897 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003898 }
3899 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003900 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003901 } break;
3902
3903 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303904 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003905 proc->pid, thread->pid, cmd);
3906 return -EINVAL;
3907 }
3908 *consumed = ptr - buffer;
3909 }
3910 return 0;
3911}
3912
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003913static void binder_stat_br(struct binder_proc *proc,
3914 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003915{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003916 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003917 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003918 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3919 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3920 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003921 }
3922}
3923
Todd Kjos60792612017-05-24 10:51:01 -07003924static int binder_put_node_cmd(struct binder_proc *proc,
3925 struct binder_thread *thread,
3926 void __user **ptrp,
3927 binder_uintptr_t node_ptr,
3928 binder_uintptr_t node_cookie,
3929 int node_debug_id,
3930 uint32_t cmd, const char *cmd_name)
3931{
3932 void __user *ptr = *ptrp;
3933
3934 if (put_user(cmd, (uint32_t __user *)ptr))
3935 return -EFAULT;
3936 ptr += sizeof(uint32_t);
3937
3938 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3939 return -EFAULT;
3940 ptr += sizeof(binder_uintptr_t);
3941
3942 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3943 return -EFAULT;
3944 ptr += sizeof(binder_uintptr_t);
3945
3946 binder_stat_br(proc, thread, cmd);
3947 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3948 proc->pid, thread->pid, cmd_name, node_debug_id,
3949 (u64)node_ptr, (u64)node_cookie);
3950
3951 *ptrp = ptr;
3952 return 0;
3953}
3954
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003955static int binder_wait_for_work(struct binder_thread *thread,
3956 bool do_proc_work)
3957{
3958 DEFINE_WAIT(wait);
3959 struct binder_proc *proc = thread->proc;
3960 int ret = 0;
3961
3962 freezer_do_not_count();
3963 binder_inner_proc_lock(proc);
3964 for (;;) {
3965 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3966 if (binder_has_work_ilocked(thread, do_proc_work))
3967 break;
3968 if (do_proc_work)
3969 list_add(&thread->waiting_thread_node,
3970 &proc->waiting_threads);
3971 binder_inner_proc_unlock(proc);
3972 schedule();
3973 binder_inner_proc_lock(proc);
3974 list_del_init(&thread->waiting_thread_node);
3975 if (signal_pending(current)) {
3976 ret = -ERESTARTSYS;
3977 break;
3978 }
3979 }
3980 finish_wait(&thread->wait, &wait);
3981 binder_inner_proc_unlock(proc);
3982 freezer_count();
3983
3984 return ret;
3985}
3986
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003987static int binder_thread_read(struct binder_proc *proc,
3988 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003989 binder_uintptr_t binder_buffer, size_t size,
3990 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003991{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003992 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003993 void __user *ptr = buffer + *consumed;
3994 void __user *end = buffer + size;
3995
3996 int ret = 0;
3997 int wait_for_proc_work;
3998
3999 if (*consumed == 0) {
4000 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4001 return -EFAULT;
4002 ptr += sizeof(uint32_t);
4003 }
4004
4005retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07004006 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004007 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07004008 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004009
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004010 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004011
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004012 trace_binder_wait_for_work(wait_for_proc_work,
4013 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004014 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004015 if (wait_for_proc_work) {
4016 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4017 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304018 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004019 proc->pid, thread->pid, thread->looper);
4020 wait_event_interruptible(binder_user_error_wait,
4021 binder_stop_on_user_error < 2);
4022 }
Martijn Coenenecd972d2017-05-26 10:48:56 -07004023 binder_restore_priority(current, proc->default_priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004024 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004025
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004026 if (non_block) {
4027 if (!binder_has_work(thread, wait_for_proc_work))
4028 ret = -EAGAIN;
4029 } else {
4030 ret = binder_wait_for_work(thread, wait_for_proc_work);
4031 }
4032
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004033 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4034
4035 if (ret)
4036 return ret;
4037
4038 while (1) {
4039 uint32_t cmd;
4040 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004041 struct binder_work *w = NULL;
4042 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004043 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07004044 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004045
Todd Kjose7f23ed2017-03-21 13:06:01 -07004046 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004047 if (!binder_worklist_empty_ilocked(&thread->todo))
4048 list = &thread->todo;
4049 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4050 wait_for_proc_work)
4051 list = &proc->todo;
4052 else {
4053 binder_inner_proc_unlock(proc);
4054
Dmitry Voytik395262a2014-09-08 18:16:34 +04004055 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08004056 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004057 goto retry;
4058 break;
4059 }
4060
Todd Kjose7f23ed2017-03-21 13:06:01 -07004061 if (end - ptr < sizeof(tr) + 4) {
4062 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004063 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004064 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004065 w = binder_dequeue_work_head_ilocked(list);
Martijn Coenen1af61802017-10-19 15:04:46 +02004066 if (binder_worklist_empty_ilocked(&thread->todo))
4067 thread->process_todo = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004068
4069 switch (w->type) {
4070 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004071 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004072 t = container_of(w, struct binder_transaction, work);
4073 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004074 case BINDER_WORK_RETURN_ERROR: {
4075 struct binder_error *e = container_of(
4076 w, struct binder_error, work);
4077
4078 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004079 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07004080 if (put_user(e->cmd, (uint32_t __user *)ptr))
4081 return -EFAULT;
宋金时e1b1a8b2018-05-10 02:05:03 +00004082 cmd = e->cmd;
Todd Kjos858b8da2017-04-21 17:35:12 -07004083 e->cmd = BR_OK;
4084 ptr += sizeof(uint32_t);
4085
4086 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07004087 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004088 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004089 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004090 cmd = BR_TRANSACTION_COMPLETE;
4091 if (put_user(cmd, (uint32_t __user *)ptr))
4092 return -EFAULT;
4093 ptr += sizeof(uint32_t);
4094
4095 binder_stat_br(proc, thread, cmd);
4096 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304097 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004098 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004099 kfree(w);
4100 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4101 } break;
4102 case BINDER_WORK_NODE: {
4103 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07004104 int strong, weak;
4105 binder_uintptr_t node_ptr = node->ptr;
4106 binder_uintptr_t node_cookie = node->cookie;
4107 int node_debug_id = node->debug_id;
4108 int has_weak_ref;
4109 int has_strong_ref;
4110 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09004111
Todd Kjos60792612017-05-24 10:51:01 -07004112 BUG_ON(proc != node->proc);
4113 strong = node->internal_strong_refs ||
4114 node->local_strong_refs;
4115 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07004116 node->local_weak_refs ||
4117 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07004118 has_strong_ref = node->has_strong_ref;
4119 has_weak_ref = node->has_weak_ref;
4120
4121 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004122 node->has_weak_ref = 1;
4123 node->pending_weak_ref = 1;
4124 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004125 }
4126 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004127 node->has_strong_ref = 1;
4128 node->pending_strong_ref = 1;
4129 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004130 }
4131 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004132 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004133 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004134 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004135 if (!weak && !strong) {
4136 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4137 "%d:%d node %d u%016llx c%016llx deleted\n",
4138 proc->pid, thread->pid,
4139 node_debug_id,
4140 (u64)node_ptr,
4141 (u64)node_cookie);
4142 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004143 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004144 binder_node_lock(node);
4145 /*
4146 * Acquire the node lock before freeing the
4147 * node to serialize with other threads that
4148 * may have been holding the node lock while
4149 * decrementing this node (avoids race where
4150 * this thread frees while the other thread
4151 * is unlocking the node after the final
4152 * decrement)
4153 */
4154 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004155 binder_free_node(node);
4156 } else
4157 binder_inner_proc_unlock(proc);
4158
Todd Kjos60792612017-05-24 10:51:01 -07004159 if (weak && !has_weak_ref)
4160 ret = binder_put_node_cmd(
4161 proc, thread, &ptr, node_ptr,
4162 node_cookie, node_debug_id,
4163 BR_INCREFS, "BR_INCREFS");
4164 if (!ret && strong && !has_strong_ref)
4165 ret = binder_put_node_cmd(
4166 proc, thread, &ptr, node_ptr,
4167 node_cookie, node_debug_id,
4168 BR_ACQUIRE, "BR_ACQUIRE");
4169 if (!ret && !strong && has_strong_ref)
4170 ret = binder_put_node_cmd(
4171 proc, thread, &ptr, node_ptr,
4172 node_cookie, node_debug_id,
4173 BR_RELEASE, "BR_RELEASE");
4174 if (!ret && !weak && has_weak_ref)
4175 ret = binder_put_node_cmd(
4176 proc, thread, &ptr, node_ptr,
4177 node_cookie, node_debug_id,
4178 BR_DECREFS, "BR_DECREFS");
4179 if (orig_ptr == ptr)
4180 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4181 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4182 proc->pid, thread->pid,
4183 node_debug_id,
4184 (u64)node_ptr,
4185 (u64)node_cookie);
4186 if (ret)
4187 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004188 } break;
4189 case BINDER_WORK_DEAD_BINDER:
4190 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4191 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4192 struct binder_ref_death *death;
4193 uint32_t cmd;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004194 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004195
4196 death = container_of(w, struct binder_ref_death, work);
4197 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4198 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4199 else
4200 cmd = BR_DEAD_BINDER;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004201 cookie = death->cookie;
4202
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004203 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004204 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004205 proc->pid, thread->pid,
4206 cmd == BR_DEAD_BINDER ?
4207 "BR_DEAD_BINDER" :
4208 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenf9eac642017-05-22 11:26:23 -07004209 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004210 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenf9eac642017-05-22 11:26:23 -07004211 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004212 kfree(death);
4213 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004214 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004215 binder_enqueue_work_ilocked(
4216 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004217 binder_inner_proc_unlock(proc);
4218 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07004219 if (put_user(cmd, (uint32_t __user *)ptr))
4220 return -EFAULT;
4221 ptr += sizeof(uint32_t);
4222 if (put_user(cookie,
4223 (binder_uintptr_t __user *)ptr))
4224 return -EFAULT;
4225 ptr += sizeof(binder_uintptr_t);
4226 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004227 if (cmd == BR_DEAD_BINDER)
4228 goto done; /* DEAD_BINDER notifications can cause transactions */
4229 } break;
4230 }
4231
4232 if (!t)
4233 continue;
4234
4235 BUG_ON(t->buffer == NULL);
4236 if (t->buffer->target_node) {
4237 struct binder_node *target_node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004238 struct binder_priority node_prio;
Seunghun Lee10f62862014-05-01 01:30:23 +09004239
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004240 tr.target.ptr = target_node->ptr;
4241 tr.cookie = target_node->cookie;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004242 node_prio.sched_policy = target_node->sched_policy;
4243 node_prio.prio = target_node->min_priority;
Martijn Coenenc46810c2017-06-23 10:13:43 -07004244 binder_transaction_priority(current, t, node_prio,
4245 target_node->inherit_rt);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004246 cmd = BR_TRANSACTION;
4247 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004248 tr.target.ptr = 0;
4249 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004250 cmd = BR_REPLY;
4251 }
4252 tr.code = t->code;
4253 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06004254 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004255
Todd Kjos2f993e22017-05-12 14:42:55 -07004256 t_from = binder_get_txn_from(t);
4257 if (t_from) {
4258 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09004259
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004260 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08004261 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004262 } else {
4263 tr.sender_pid = 0;
4264 }
4265
4266 tr.data_size = t->buffer->data_size;
4267 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07004268 tr.data.ptr.buffer = (binder_uintptr_t)
4269 ((uintptr_t)t->buffer->data +
4270 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004271 tr.data.ptr.offsets = tr.data.ptr.buffer +
4272 ALIGN(t->buffer->data_size,
4273 sizeof(void *));
4274
Todd Kjos2f993e22017-05-12 14:42:55 -07004275 if (put_user(cmd, (uint32_t __user *)ptr)) {
4276 if (t_from)
4277 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004278
4279 binder_cleanup_transaction(t, "put_user failed",
4280 BR_FAILED_REPLY);
4281
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004282 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004283 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004284 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07004285 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4286 if (t_from)
4287 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004288
4289 binder_cleanup_transaction(t, "copy_to_user failed",
4290 BR_FAILED_REPLY);
4291
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004292 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004293 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004294 ptr += sizeof(tr);
4295
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004296 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004297 binder_stat_br(proc, thread, cmd);
4298 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004299 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004300 proc->pid, thread->pid,
4301 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4302 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07004303 t->debug_id, t_from ? t_from->proc->pid : 0,
4304 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004305 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004306 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004307
Todd Kjos2f993e22017-05-12 14:42:55 -07004308 if (t_from)
4309 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004310 t->buffer->allow_user_free = 1;
4311 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07004312 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004313 t->to_parent = thread->transaction_stack;
4314 t->to_thread = thread;
4315 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07004316 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004317 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07004318 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004319 }
4320 break;
4321 }
4322
4323done:
4324
4325 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07004326 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004327 if (proc->requested_threads == 0 &&
4328 list_empty(&thread->proc->waiting_threads) &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004329 proc->requested_threads_started < proc->max_threads &&
4330 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4331 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4332 /*spawn a new thread if we leave this out */) {
4333 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07004334 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004335 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304336 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004337 proc->pid, thread->pid);
4338 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4339 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07004340 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07004341 } else
4342 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004343 return 0;
4344}
4345
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004346static void binder_release_work(struct binder_proc *proc,
4347 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004348{
4349 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09004350
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004351 while (1) {
4352 w = binder_dequeue_work_head(proc, list);
4353 if (!w)
4354 return;
4355
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004356 switch (w->type) {
4357 case BINDER_WORK_TRANSACTION: {
4358 struct binder_transaction *t;
4359
4360 t = container_of(w, struct binder_transaction, work);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004361
4362 binder_cleanup_transaction(t, "process died.",
4363 BR_DEAD_REPLY);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004364 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004365 case BINDER_WORK_RETURN_ERROR: {
4366 struct binder_error *e = container_of(
4367 w, struct binder_error, work);
4368
4369 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4370 "undelivered TRANSACTION_ERROR: %u\n",
4371 e->cmd);
4372 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004373 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004374 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304375 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004376 kfree(w);
4377 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4378 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004379 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4380 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4381 struct binder_ref_death *death;
4382
4383 death = container_of(w, struct binder_ref_death, work);
4384 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004385 "undelivered death notification, %016llx\n",
4386 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004387 kfree(death);
4388 binder_stats_deleted(BINDER_STAT_DEATH);
4389 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004390 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304391 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004392 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004393 break;
4394 }
4395 }
4396
4397}
4398
Todd Kjosb4827902017-05-25 15:52:17 -07004399static struct binder_thread *binder_get_thread_ilocked(
4400 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004401{
4402 struct binder_thread *thread = NULL;
4403 struct rb_node *parent = NULL;
4404 struct rb_node **p = &proc->threads.rb_node;
4405
4406 while (*p) {
4407 parent = *p;
4408 thread = rb_entry(parent, struct binder_thread, rb_node);
4409
4410 if (current->pid < thread->pid)
4411 p = &(*p)->rb_left;
4412 else if (current->pid > thread->pid)
4413 p = &(*p)->rb_right;
4414 else
Todd Kjosb4827902017-05-25 15:52:17 -07004415 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004416 }
Todd Kjosb4827902017-05-25 15:52:17 -07004417 if (!new_thread)
4418 return NULL;
4419 thread = new_thread;
4420 binder_stats_created(BINDER_STAT_THREAD);
4421 thread->proc = proc;
4422 thread->pid = current->pid;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004423 get_task_struct(current);
4424 thread->task = current;
Todd Kjosb4827902017-05-25 15:52:17 -07004425 atomic_set(&thread->tmp_ref, 0);
4426 init_waitqueue_head(&thread->wait);
4427 INIT_LIST_HEAD(&thread->todo);
4428 rb_link_node(&thread->rb_node, parent, p);
4429 rb_insert_color(&thread->rb_node, &proc->threads);
4430 thread->looper_need_return = true;
4431 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4432 thread->return_error.cmd = BR_OK;
4433 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4434 thread->reply_error.cmd = BR_OK;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004435 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004436 return thread;
4437}
4438
4439static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4440{
4441 struct binder_thread *thread;
4442 struct binder_thread *new_thread;
4443
4444 binder_inner_proc_lock(proc);
4445 thread = binder_get_thread_ilocked(proc, NULL);
4446 binder_inner_proc_unlock(proc);
4447 if (!thread) {
4448 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4449 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004450 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004451 binder_inner_proc_lock(proc);
4452 thread = binder_get_thread_ilocked(proc, new_thread);
4453 binder_inner_proc_unlock(proc);
4454 if (thread != new_thread)
4455 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004456 }
4457 return thread;
4458}
4459
Todd Kjos2f993e22017-05-12 14:42:55 -07004460static void binder_free_proc(struct binder_proc *proc)
4461{
4462 BUG_ON(!list_empty(&proc->todo));
4463 BUG_ON(!list_empty(&proc->delivered_death));
4464 binder_alloc_deferred_release(&proc->alloc);
4465 put_task_struct(proc->tsk);
4466 binder_stats_deleted(BINDER_STAT_PROC);
4467 kfree(proc);
4468}
4469
4470static void binder_free_thread(struct binder_thread *thread)
4471{
4472 BUG_ON(!list_empty(&thread->todo));
4473 binder_stats_deleted(BINDER_STAT_THREAD);
4474 binder_proc_dec_tmpref(thread->proc);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004475 put_task_struct(thread->task);
Todd Kjos2f993e22017-05-12 14:42:55 -07004476 kfree(thread);
4477}
4478
4479static int binder_thread_release(struct binder_proc *proc,
4480 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004481{
4482 struct binder_transaction *t;
4483 struct binder_transaction *send_reply = NULL;
4484 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004485 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004486
Todd Kjosb4827902017-05-25 15:52:17 -07004487 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004488 /*
4489 * take a ref on the proc so it survives
4490 * after we remove this thread from proc->threads.
4491 * The corresponding dec is when we actually
4492 * free the thread in binder_free_thread()
4493 */
4494 proc->tmp_ref++;
4495 /*
4496 * take a ref on this thread to ensure it
4497 * survives while we are releasing it
4498 */
4499 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004500 rb_erase(&thread->rb_node, &proc->threads);
4501 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004502 if (t) {
4503 spin_lock(&t->lock);
4504 if (t->to_thread == thread)
4505 send_reply = t;
4506 }
4507 thread->is_dead = true;
4508
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004509 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004510 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004511 active_transactions++;
4512 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304513 "release %d:%d transaction %d %s, still active\n",
4514 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004515 t->debug_id,
4516 (t->to_thread == thread) ? "in" : "out");
4517
4518 if (t->to_thread == thread) {
4519 t->to_proc = NULL;
4520 t->to_thread = NULL;
4521 if (t->buffer) {
4522 t->buffer->transaction = NULL;
4523 t->buffer = NULL;
4524 }
4525 t = t->to_parent;
4526 } else if (t->from == thread) {
4527 t->from = NULL;
4528 t = t->from_parent;
4529 } else
4530 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004531 spin_unlock(&last_t->lock);
4532 if (t)
4533 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004534 }
Martijn Coenen550c01d2018-01-05 11:27:07 +01004535
4536 /*
4537 * If this thread used poll, make sure we remove the waitqueue
4538 * from any epoll data structures holding it with POLLFREE.
4539 * waitqueue_active() is safe to use here because we're holding
4540 * the inner lock.
4541 */
4542 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4543 waitqueue_active(&thread->wait)) {
4544 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4545 }
4546
Todd Kjosb4827902017-05-25 15:52:17 -07004547 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004548
Martijn Coenen72766d72018-02-16 09:47:15 +01004549 /*
4550 * This is needed to avoid races between wake_up_poll() above and
4551 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4552 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4553 * lock, so we can be sure it's done after calling synchronize_rcu().
4554 */
4555 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4556 synchronize_rcu();
4557
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004558 if (send_reply)
4559 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004560 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004561 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004562 return active_transactions;
4563}
4564
4565static unsigned int binder_poll(struct file *filp,
4566 struct poll_table_struct *wait)
4567{
4568 struct binder_proc *proc = filp->private_data;
4569 struct binder_thread *thread = NULL;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004570 bool wait_for_proc_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004571
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004572 thread = binder_get_thread(proc);
Greg Kroah-Hartman6e463bb2018-02-28 17:17:14 +01004573 if (!thread)
Eric Biggers4be5a282018-01-30 23:11:24 -08004574 return POLLERR;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004575
Martijn Coenen995a36e2017-06-02 13:36:52 -07004576 binder_inner_proc_lock(thread->proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004577 thread->looper |= BINDER_LOOPER_STATE_POLL;
4578 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4579
Martijn Coenen995a36e2017-06-02 13:36:52 -07004580 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004581
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004582 poll_wait(filp, &thread->wait, wait);
4583
Martijn Coenen47810932017-08-10 12:32:00 +02004584 if (binder_has_work(thread, wait_for_proc_work))
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004585 return POLLIN;
4586
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004587 return 0;
4588}
4589
Tair Rzayev78260ac2014-06-03 22:27:21 +03004590static int binder_ioctl_write_read(struct file *filp,
4591 unsigned int cmd, unsigned long arg,
4592 struct binder_thread *thread)
4593{
4594 int ret = 0;
4595 struct binder_proc *proc = filp->private_data;
4596 unsigned int size = _IOC_SIZE(cmd);
4597 void __user *ubuf = (void __user *)arg;
4598 struct binder_write_read bwr;
4599
4600 if (size != sizeof(struct binder_write_read)) {
4601 ret = -EINVAL;
4602 goto out;
4603 }
4604 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4605 ret = -EFAULT;
4606 goto out;
4607 }
4608 binder_debug(BINDER_DEBUG_READ_WRITE,
4609 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4610 proc->pid, thread->pid,
4611 (u64)bwr.write_size, (u64)bwr.write_buffer,
4612 (u64)bwr.read_size, (u64)bwr.read_buffer);
4613
4614 if (bwr.write_size > 0) {
4615 ret = binder_thread_write(proc, thread,
4616 bwr.write_buffer,
4617 bwr.write_size,
4618 &bwr.write_consumed);
4619 trace_binder_write_done(ret);
4620 if (ret < 0) {
4621 bwr.read_consumed = 0;
4622 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4623 ret = -EFAULT;
4624 goto out;
4625 }
4626 }
4627 if (bwr.read_size > 0) {
4628 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4629 bwr.read_size,
4630 &bwr.read_consumed,
4631 filp->f_flags & O_NONBLOCK);
4632 trace_binder_read_done(ret);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004633 binder_inner_proc_lock(proc);
4634 if (!binder_worklist_empty_ilocked(&proc->todo))
Martijn Coenen053be422017-06-06 15:17:46 -07004635 binder_wakeup_proc_ilocked(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004636 binder_inner_proc_unlock(proc);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004637 if (ret < 0) {
4638 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4639 ret = -EFAULT;
4640 goto out;
4641 }
4642 }
4643 binder_debug(BINDER_DEBUG_READ_WRITE,
4644 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4645 proc->pid, thread->pid,
4646 (u64)bwr.write_consumed, (u64)bwr.write_size,
4647 (u64)bwr.read_consumed, (u64)bwr.read_size);
4648 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4649 ret = -EFAULT;
4650 goto out;
4651 }
4652out:
4653 return ret;
4654}
4655
4656static int binder_ioctl_set_ctx_mgr(struct file *filp)
4657{
4658 int ret = 0;
4659 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004660 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004661 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004662 kuid_t curr_euid = current_euid();
4663
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004664 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004665 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004666 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4667 ret = -EBUSY;
4668 goto out;
4669 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004670 ret = security_binder_set_context_mgr(proc->tsk);
4671 if (ret < 0)
4672 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004673 if (uid_valid(context->binder_context_mgr_uid)) {
4674 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004675 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4676 from_kuid(&init_user_ns, curr_euid),
4677 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004678 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004679 ret = -EPERM;
4680 goto out;
4681 }
4682 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004683 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004684 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004685 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004686 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004687 ret = -ENOMEM;
4688 goto out;
4689 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004690 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004691 new_node->local_weak_refs++;
4692 new_node->local_strong_refs++;
4693 new_node->has_strong_ref = 1;
4694 new_node->has_weak_ref = 1;
4695 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004696 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004697 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004698out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004699 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004700 return ret;
4701}
4702
Colin Cross833babb32017-06-20 13:54:44 -07004703static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4704 struct binder_node_debug_info *info) {
4705 struct rb_node *n;
4706 binder_uintptr_t ptr = info->ptr;
4707
4708 memset(info, 0, sizeof(*info));
4709
4710 binder_inner_proc_lock(proc);
4711 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4712 struct binder_node *node = rb_entry(n, struct binder_node,
4713 rb_node);
4714 if (node->ptr > ptr) {
4715 info->ptr = node->ptr;
4716 info->cookie = node->cookie;
4717 info->has_strong_ref = node->has_strong_ref;
4718 info->has_weak_ref = node->has_weak_ref;
4719 break;
4720 }
4721 }
4722 binder_inner_proc_unlock(proc);
4723
4724 return 0;
4725}
4726
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004727static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4728{
4729 int ret;
4730 struct binder_proc *proc = filp->private_data;
4731 struct binder_thread *thread;
4732 unsigned int size = _IOC_SIZE(cmd);
4733 void __user *ubuf = (void __user *)arg;
4734
Tair Rzayev78260ac2014-06-03 22:27:21 +03004735 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4736 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004737
Sherry Yang435416b2017-06-22 14:37:45 -07004738 binder_selftest_alloc(&proc->alloc);
4739
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004740 trace_binder_ioctl(cmd, arg);
4741
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004742 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4743 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004744 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004745
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004746 thread = binder_get_thread(proc);
4747 if (thread == NULL) {
4748 ret = -ENOMEM;
4749 goto err;
4750 }
4751
4752 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004753 case BINDER_WRITE_READ:
4754 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4755 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004756 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004757 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004758 case BINDER_SET_MAX_THREADS: {
4759 int max_threads;
4760
4761 if (copy_from_user(&max_threads, ubuf,
4762 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004763 ret = -EINVAL;
4764 goto err;
4765 }
Todd Kjosd600e902017-05-25 17:35:02 -07004766 binder_inner_proc_lock(proc);
4767 proc->max_threads = max_threads;
4768 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004769 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004770 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004771 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004772 ret = binder_ioctl_set_ctx_mgr(filp);
4773 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004774 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004775 break;
4776 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304777 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004778 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004779 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004780 thread = NULL;
4781 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004782 case BINDER_VERSION: {
4783 struct binder_version __user *ver = ubuf;
4784
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004785 if (size != sizeof(struct binder_version)) {
4786 ret = -EINVAL;
4787 goto err;
4788 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004789 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4790 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004791 ret = -EINVAL;
4792 goto err;
4793 }
4794 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004795 }
Colin Cross833babb32017-06-20 13:54:44 -07004796 case BINDER_GET_NODE_DEBUG_INFO: {
4797 struct binder_node_debug_info info;
4798
4799 if (copy_from_user(&info, ubuf, sizeof(info))) {
4800 ret = -EFAULT;
4801 goto err;
4802 }
4803
4804 ret = binder_ioctl_get_node_debug_info(proc, &info);
4805 if (ret < 0)
4806 goto err;
4807
4808 if (copy_to_user(ubuf, &info, sizeof(info))) {
4809 ret = -EFAULT;
4810 goto err;
4811 }
4812 break;
4813 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004814 default:
4815 ret = -EINVAL;
4816 goto err;
4817 }
4818 ret = 0;
4819err:
4820 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004821 thread->looper_need_return = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004822 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4823 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304824 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004825err_unlocked:
4826 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004827 return ret;
4828}
4829
4830static void binder_vma_open(struct vm_area_struct *vma)
4831{
4832 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004833
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004834 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304835 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004836 proc->pid, vma->vm_start, vma->vm_end,
4837 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4838 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004839}
4840
4841static void binder_vma_close(struct vm_area_struct *vma)
4842{
4843 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004844
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004845 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304846 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004847 proc->pid, vma->vm_start, vma->vm_end,
4848 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4849 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004850 binder_alloc_vma_close(&proc->alloc);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02004851 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004852}
4853
Vinayak Menonddac7d52014-06-02 18:17:59 +05304854static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4855{
4856 return VM_FAULT_SIGBUS;
4857}
4858
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004859static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004860 .open = binder_vma_open,
4861 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304862 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004863};
4864
Todd Kjosd325d372016-10-10 10:40:53 -07004865static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4866{
4867 int ret;
4868 struct binder_proc *proc = filp->private_data;
4869 const char *failure_string;
4870
4871 if (proc->tsk != current->group_leader)
4872 return -EINVAL;
4873
4874 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4875 vma->vm_end = vma->vm_start + SZ_4M;
4876
4877 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4878 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4879 __func__, proc->pid, vma->vm_start, vma->vm_end,
4880 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4881 (unsigned long)pgprot_val(vma->vm_page_prot));
4882
4883 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4884 ret = -EPERM;
4885 failure_string = "bad vm_flags";
4886 goto err_bad_arg;
4887 }
Minchan Kim2cafd5b2018-05-07 23:15:37 +09004888 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4889 vma->vm_flags &= ~VM_MAYWRITE;
4890
Todd Kjosd325d372016-10-10 10:40:53 -07004891 vma->vm_ops = &binder_vm_ops;
4892 vma->vm_private_data = proc;
4893
4894 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02004895 if (ret)
4896 return ret;
4897 proc->files = get_files_struct(current);
4898 return 0;
Todd Kjosd325d372016-10-10 10:40:53 -07004899
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004900err_bad_arg:
Elad Wexler6b646402017-12-29 11:03:37 +02004901 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004902 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4903 return ret;
4904}
4905
4906static int binder_open(struct inode *nodp, struct file *filp)
4907{
4908 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004909 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004910
Elad Wexler6b646402017-12-29 11:03:37 +02004911 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004912 current->group_leader->pid, current->pid);
4913
4914 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4915 if (proc == NULL)
4916 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004917 spin_lock_init(&proc->inner_lock);
4918 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004919 get_task_struct(current->group_leader);
4920 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004921 INIT_LIST_HEAD(&proc->todo);
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004922 if (binder_supported_policy(current->policy)) {
4923 proc->default_priority.sched_policy = current->policy;
4924 proc->default_priority.prio = current->normal_prio;
4925 } else {
4926 proc->default_priority.sched_policy = SCHED_NORMAL;
4927 proc->default_priority.prio = NICE_TO_PRIO(0);
4928 }
4929
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004930 binder_dev = container_of(filp->private_data, struct binder_device,
4931 miscdev);
4932 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004933 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004934
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004935 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004936 proc->pid = current->group_leader->pid;
4937 INIT_LIST_HEAD(&proc->delivered_death);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004938 INIT_LIST_HEAD(&proc->waiting_threads);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004939 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004940
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004941 mutex_lock(&binder_procs_lock);
4942 hlist_add_head(&proc->proc_node, &binder_procs);
4943 mutex_unlock(&binder_procs_lock);
4944
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004945 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004946 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004947
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004948 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004949 /*
4950 * proc debug entries are shared between contexts, so
4951 * this will fail if the process tries to open the driver
4952 * again with a different context. The priting code will
4953 * anyway print all contexts that a given PID has, so this
4954 * is not a problem.
4955 */
Harsh Shandilya174562a2017-12-22 19:37:02 +05304956 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004957 binder_debugfs_dir_entry_proc,
4958 (void *)(unsigned long)proc->pid,
4959 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004960 }
4961
4962 return 0;
4963}
4964
4965static int binder_flush(struct file *filp, fl_owner_t id)
4966{
4967 struct binder_proc *proc = filp->private_data;
4968
4969 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4970
4971 return 0;
4972}
4973
4974static void binder_deferred_flush(struct binder_proc *proc)
4975{
4976 struct rb_node *n;
4977 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004978
Todd Kjosb4827902017-05-25 15:52:17 -07004979 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004980 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4981 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004982
Todd Kjos6798e6d2017-01-06 14:19:25 -08004983 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004984 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4985 wake_up_interruptible(&thread->wait);
4986 wake_count++;
4987 }
4988 }
Todd Kjosb4827902017-05-25 15:52:17 -07004989 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004990
4991 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4992 "binder_flush: %d woke %d threads\n", proc->pid,
4993 wake_count);
4994}
4995
4996static int binder_release(struct inode *nodp, struct file *filp)
4997{
4998 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004999
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005000 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005001 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5002
5003 return 0;
5004}
5005
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005006static int binder_node_release(struct binder_node *node, int refs)
5007{
5008 struct binder_ref *ref;
5009 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07005010 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005011
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005012 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005013
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005014 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005015 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005016 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07005017 /*
5018 * The caller must have taken a temporary ref on the node,
5019 */
5020 BUG_ON(!node->tmp_refs);
5021 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07005022 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005023 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005024 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005025
5026 return refs;
5027 }
5028
5029 node->proc = NULL;
5030 node->local_strong_refs = 0;
5031 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07005032 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005033
5034 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005035 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005036 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005037
5038 hlist_for_each_entry(ref, &node->refs, node_entry) {
5039 refs++;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005040 /*
5041 * Need the node lock to synchronize
5042 * with new notification requests and the
5043 * inner lock to synchronize with queued
5044 * death notifications.
5045 */
5046 binder_inner_proc_lock(ref->proc);
5047 if (!ref->death) {
5048 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08005049 continue;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005050 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005051
5052 death++;
5053
Martijn Coenenf9eac642017-05-22 11:26:23 -07005054 BUG_ON(!list_empty(&ref->death->work.entry));
5055 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5056 binder_enqueue_work_ilocked(&ref->death->work,
5057 &ref->proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07005058 binder_wakeup_proc_ilocked(ref->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005059 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005060 }
5061
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005062 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5063 "node %d now dead, refs %d, death %d\n",
5064 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005065 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07005066 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005067
5068 return refs;
5069}
5070
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005071static void binder_deferred_release(struct binder_proc *proc)
5072{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005073 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005074 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07005075 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005076
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005077 BUG_ON(proc->files);
5078
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005079 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005080 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005081 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005082
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005083 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005084 if (context->binder_context_mgr_node &&
5085 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005086 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005087 "%s: %d context_mgr_node gone\n",
5088 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005089 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005090 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005091 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07005092 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07005093 /*
5094 * Make sure proc stays alive after we
5095 * remove all the threads
5096 */
5097 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005098
Todd Kjos2f993e22017-05-12 14:42:55 -07005099 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005100 threads = 0;
5101 active_transactions = 0;
5102 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005103 struct binder_thread *thread;
5104
5105 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07005106 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005107 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07005108 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07005109 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005110 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005111
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005112 nodes = 0;
5113 incoming_refs = 0;
5114 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005115 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005116
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005117 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005118 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07005119 /*
5120 * take a temporary ref on the node before
5121 * calling binder_node_release() which will either
5122 * kfree() the node or call binder_put_node()
5123 */
Todd Kjos425d23f2017-06-12 12:07:26 -07005124 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005125 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07005126 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005127 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07005128 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005129 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005130 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005131
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005132 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005133 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005134 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005135 struct binder_ref *ref;
5136
5137 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005138 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07005139 binder_cleanup_ref_olocked(ref);
5140 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005141 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07005142 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005143 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005144 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005145
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005146 binder_release_work(proc, &proc->todo);
5147 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005148
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005149 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07005150 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005151 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07005152 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005153
Todd Kjos2f993e22017-05-12 14:42:55 -07005154 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005155}
5156
5157static void binder_deferred_func(struct work_struct *work)
5158{
5159 struct binder_proc *proc;
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005160 struct files_struct *files;
5161
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005162 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09005163
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005164 do {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005165 mutex_lock(&binder_deferred_lock);
5166 if (!hlist_empty(&binder_deferred_list)) {
5167 proc = hlist_entry(binder_deferred_list.first,
5168 struct binder_proc, deferred_work_node);
5169 hlist_del_init(&proc->deferred_work_node);
5170 defer = proc->deferred_work;
5171 proc->deferred_work = 0;
5172 } else {
5173 proc = NULL;
5174 defer = 0;
5175 }
5176 mutex_unlock(&binder_deferred_lock);
5177
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005178 files = NULL;
5179 if (defer & BINDER_DEFERRED_PUT_FILES) {
5180 files = proc->files;
5181 if (files)
5182 proc->files = NULL;
5183 }
5184
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005185 if (defer & BINDER_DEFERRED_FLUSH)
5186 binder_deferred_flush(proc);
5187
5188 if (defer & BINDER_DEFERRED_RELEASE)
5189 binder_deferred_release(proc); /* frees proc */
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005190
5191 if (files)
5192 put_files_struct(files);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005193 } while (proc);
5194}
5195static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5196
5197static void
5198binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5199{
5200 mutex_lock(&binder_deferred_lock);
5201 proc->deferred_work |= defer;
5202 if (hlist_unhashed(&proc->deferred_work_node)) {
5203 hlist_add_head(&proc->deferred_work_node,
5204 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05305205 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005206 }
5207 mutex_unlock(&binder_deferred_lock);
5208}
5209
Todd Kjos6d241a42017-04-21 14:32:11 -07005210static void print_binder_transaction_ilocked(struct seq_file *m,
5211 struct binder_proc *proc,
5212 const char *prefix,
5213 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005214{
Todd Kjos6d241a42017-04-21 14:32:11 -07005215 struct binder_proc *to_proc;
5216 struct binder_buffer *buffer = t->buffer;
5217
Todd Kjos2f993e22017-05-12 14:42:55 -07005218 spin_lock(&t->lock);
Todd Kjos6d241a42017-04-21 14:32:11 -07005219 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005220 seq_printf(m,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005221 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005222 prefix, t->debug_id, t,
5223 t->from ? t->from->proc->pid : 0,
5224 t->from ? t->from->pid : 0,
Todd Kjos6d241a42017-04-21 14:32:11 -07005225 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005226 t->to_thread ? t->to_thread->pid : 0,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005227 t->code, t->flags, t->priority.sched_policy,
5228 t->priority.prio, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07005229 spin_unlock(&t->lock);
5230
Todd Kjos6d241a42017-04-21 14:32:11 -07005231 if (proc != to_proc) {
5232 /*
5233 * Can only safely deref buffer if we are holding the
5234 * correct proc inner lock for this node
5235 */
5236 seq_puts(m, "\n");
5237 return;
5238 }
5239
5240 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005241 seq_puts(m, " buffer free\n");
5242 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005243 }
Todd Kjos6d241a42017-04-21 14:32:11 -07005244 if (buffer->target_node)
5245 seq_printf(m, " node %d", buffer->target_node->debug_id);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005246 seq_printf(m, " size %zd:%zd data %p\n",
Todd Kjos6d241a42017-04-21 14:32:11 -07005247 buffer->data_size, buffer->offsets_size,
5248 buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005249}
5250
Todd Kjos6d241a42017-04-21 14:32:11 -07005251static void print_binder_work_ilocked(struct seq_file *m,
5252 struct binder_proc *proc,
5253 const char *prefix,
5254 const char *transaction_prefix,
5255 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005256{
5257 struct binder_node *node;
5258 struct binder_transaction *t;
5259
5260 switch (w->type) {
5261 case BINDER_WORK_TRANSACTION:
5262 t = container_of(w, struct binder_transaction, work);
Todd Kjos6d241a42017-04-21 14:32:11 -07005263 print_binder_transaction_ilocked(
5264 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005265 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07005266 case BINDER_WORK_RETURN_ERROR: {
5267 struct binder_error *e = container_of(
5268 w, struct binder_error, work);
5269
5270 seq_printf(m, "%stransaction error: %u\n",
5271 prefix, e->cmd);
5272 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005273 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005274 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005275 break;
5276 case BINDER_WORK_NODE:
5277 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08005278 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5279 prefix, node->debug_id,
5280 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005281 break;
5282 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005283 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005284 break;
5285 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005286 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005287 break;
5288 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005289 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005290 break;
5291 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005292 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005293 break;
5294 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005295}
5296
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005297static void print_binder_thread_ilocked(struct seq_file *m,
5298 struct binder_thread *thread,
5299 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005300{
5301 struct binder_transaction *t;
5302 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005303 size_t start_pos = m->count;
5304 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005305
Todd Kjos2f993e22017-05-12 14:42:55 -07005306 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08005307 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07005308 thread->looper_need_return,
5309 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005310 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005311 t = thread->transaction_stack;
5312 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005313 if (t->from == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005314 print_binder_transaction_ilocked(m, thread->proc,
5315 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005316 t = t->from_parent;
5317 } else if (t->to_thread == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005318 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005319 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005320 t = t->to_parent;
5321 } else {
Todd Kjos6d241a42017-04-21 14:32:11 -07005322 print_binder_transaction_ilocked(m, thread->proc,
5323 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005324 t = NULL;
5325 }
5326 }
5327 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005328 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005329 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005330 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005331 if (!print_always && m->count == header_pos)
5332 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005333}
5334
Todd Kjos425d23f2017-06-12 12:07:26 -07005335static void print_binder_node_nilocked(struct seq_file *m,
5336 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005337{
5338 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005339 struct binder_work *w;
5340 int count;
5341
5342 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08005343 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005344 count++;
5345
Martijn Coenen6aac9792017-06-07 09:29:14 -07005346 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08005347 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Martijn Coenen6aac9792017-06-07 09:29:14 -07005348 node->sched_policy, node->min_priority,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005349 node->has_strong_ref, node->has_weak_ref,
5350 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07005351 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005352 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005353 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08005354 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005355 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005356 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005357 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005358 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005359 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005360 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005361 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005362 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005363}
5364
Todd Kjos5346bf32016-10-20 16:43:34 -07005365static void print_binder_ref_olocked(struct seq_file *m,
5366 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005367{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005368 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005369 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5370 ref->data.debug_id, ref->data.desc,
5371 ref->node->proc ? "" : "dead ",
5372 ref->node->debug_id, ref->data.strong,
5373 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005374 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005375}
5376
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005377static void print_binder_proc(struct seq_file *m,
5378 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005379{
5380 struct binder_work *w;
5381 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005382 size_t start_pos = m->count;
5383 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07005384 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005385
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005386 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005387 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005388 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005389
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005390 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005391 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005392 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005393 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07005394
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005395 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005396 struct binder_node *node = rb_entry(n, struct binder_node,
5397 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005398 /*
5399 * take a temporary reference on the node so it
5400 * survives and isn't removed from the tree
5401 * while we print it.
5402 */
5403 binder_inc_node_tmpref_ilocked(node);
5404 /* Need to drop inner lock to take node lock */
5405 binder_inner_proc_unlock(proc);
5406 if (last_node)
5407 binder_put_node(last_node);
5408 binder_node_inner_lock(node);
5409 print_binder_node_nilocked(m, node);
5410 binder_node_inner_unlock(node);
5411 last_node = node;
5412 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005413 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005414 binder_inner_proc_unlock(proc);
5415 if (last_node)
5416 binder_put_node(last_node);
5417
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005418 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07005419 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005420 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005421 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005422 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07005423 print_binder_ref_olocked(m, rb_entry(n,
5424 struct binder_ref,
5425 rb_node_desc));
5426 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005427 }
Todd Kjosd325d372016-10-10 10:40:53 -07005428 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005429 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005430 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005431 print_binder_work_ilocked(m, proc, " ",
5432 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005433 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005434 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005435 break;
5436 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005437 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005438 if (!print_all && m->count == header_pos)
5439 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005440}
5441
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005442static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005443 "BR_ERROR",
5444 "BR_OK",
5445 "BR_TRANSACTION",
5446 "BR_REPLY",
5447 "BR_ACQUIRE_RESULT",
5448 "BR_DEAD_REPLY",
5449 "BR_TRANSACTION_COMPLETE",
5450 "BR_INCREFS",
5451 "BR_ACQUIRE",
5452 "BR_RELEASE",
5453 "BR_DECREFS",
5454 "BR_ATTEMPT_ACQUIRE",
5455 "BR_NOOP",
5456 "BR_SPAWN_LOOPER",
5457 "BR_FINISHED",
5458 "BR_DEAD_BINDER",
5459 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5460 "BR_FAILED_REPLY"
5461};
5462
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005463static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005464 "BC_TRANSACTION",
5465 "BC_REPLY",
5466 "BC_ACQUIRE_RESULT",
5467 "BC_FREE_BUFFER",
5468 "BC_INCREFS",
5469 "BC_ACQUIRE",
5470 "BC_RELEASE",
5471 "BC_DECREFS",
5472 "BC_INCREFS_DONE",
5473 "BC_ACQUIRE_DONE",
5474 "BC_ATTEMPT_ACQUIRE",
5475 "BC_REGISTER_LOOPER",
5476 "BC_ENTER_LOOPER",
5477 "BC_EXIT_LOOPER",
5478 "BC_REQUEST_DEATH_NOTIFICATION",
5479 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02005480 "BC_DEAD_BINDER_DONE",
5481 "BC_TRANSACTION_SG",
5482 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005483};
5484
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005485static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005486 "proc",
5487 "thread",
5488 "node",
5489 "ref",
5490 "death",
5491 "transaction",
5492 "transaction_complete"
5493};
5494
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005495static void print_binder_stats(struct seq_file *m, const char *prefix,
5496 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005497{
5498 int i;
5499
5500 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005501 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005502 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005503 int temp = atomic_read(&stats->bc[i]);
5504
5505 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005506 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005507 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005508 }
5509
5510 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005511 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005512 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005513 int temp = atomic_read(&stats->br[i]);
5514
5515 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005516 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005517 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005518 }
5519
5520 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005521 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005522 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005523 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005524 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005525 int created = atomic_read(&stats->obj_created[i]);
5526 int deleted = atomic_read(&stats->obj_deleted[i]);
5527
5528 if (created || deleted)
5529 seq_printf(m, "%s%s: active %d total %d\n",
5530 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005531 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005532 created - deleted,
5533 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005534 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005535}
5536
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005537static void print_binder_proc_stats(struct seq_file *m,
5538 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005539{
5540 struct binder_work *w;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005541 struct binder_thread *thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005542 struct rb_node *n;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005543 int count, strong, weak, ready_threads;
Todd Kjosb4827902017-05-25 15:52:17 -07005544 size_t free_async_space =
5545 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005546
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005547 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005548 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005549 count = 0;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005550 ready_threads = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005551 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005552 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5553 count++;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005554
5555 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5556 ready_threads++;
5557
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005558 seq_printf(m, " threads: %d\n", count);
5559 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005560 " ready threads %d\n"
5561 " free async space %zd\n", proc->requested_threads,
5562 proc->requested_threads_started, proc->max_threads,
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005563 ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005564 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005565 count = 0;
5566 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5567 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005568 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005569 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005570 count = 0;
5571 strong = 0;
5572 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005573 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005574 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5575 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5576 rb_node_desc);
5577 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005578 strong += ref->data.strong;
5579 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005580 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005581 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005582 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005583
Todd Kjosd325d372016-10-10 10:40:53 -07005584 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005585 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005586
Sherry Yang91004422017-08-22 17:26:57 -07005587 binder_alloc_print_pages(m, &proc->alloc);
5588
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005589 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005590 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005591 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005592 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005593 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005594 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005595 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005596 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005597
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005598 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005599}
5600
5601
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005602static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005603{
5604 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005605 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005606 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005607
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005608 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005609
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005610 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005611 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005612 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005613 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5614 /*
5615 * take a temporary reference on the node so it
5616 * survives and isn't removed from the list
5617 * while we print it.
5618 */
5619 node->tmp_refs++;
5620 spin_unlock(&binder_dead_nodes_lock);
5621 if (last_node)
5622 binder_put_node(last_node);
5623 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005624 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005625 binder_node_unlock(node);
5626 last_node = node;
5627 spin_lock(&binder_dead_nodes_lock);
5628 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005629 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005630 if (last_node)
5631 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005632
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005633 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005634 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005635 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005636 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005637
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005638 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005639}
5640
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005641static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005642{
5643 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005644
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005645 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005646
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005647 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005648
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005649 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005650 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005651 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005652 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005653
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005654 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005655}
5656
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005657static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005658{
5659 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005660
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005661 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005662 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005663 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005664 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005665 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005666
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005667 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005668}
5669
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005670static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005671{
Riley Andrews83050a42016-02-09 21:05:33 -08005672 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005673 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005674
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005675 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005676 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005677 if (itr->pid == pid) {
5678 seq_puts(m, "binder proc state:\n");
5679 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005680 }
5681 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005682 mutex_unlock(&binder_procs_lock);
5683
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005684 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005685}
5686
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005687static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005688 struct binder_transaction_log_entry *e)
5689{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005690 int debug_id = READ_ONCE(e->debug_id_done);
5691 /*
5692 * read barrier to guarantee debug_id_done read before
5693 * we print the log values
5694 */
5695 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005696 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005697 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005698 e->debug_id, (e->call_type == 2) ? "reply" :
5699 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005700 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005701 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5702 e->return_error, e->return_error_param,
5703 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005704 /*
5705 * read-barrier to guarantee read of debug_id_done after
5706 * done printing the fields of the entry
5707 */
5708 smp_rmb();
5709 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5710 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005711}
5712
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005713static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005714{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005715 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005716 unsigned int log_cur = atomic_read(&log->cur);
5717 unsigned int count;
5718 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005719 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005720
Todd Kjos1cfe6272017-05-24 13:33:28 -07005721 count = log_cur + 1;
5722 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5723 0 : count % ARRAY_SIZE(log->entry);
5724 if (count > ARRAY_SIZE(log->entry) || log->full)
5725 count = ARRAY_SIZE(log->entry);
5726 for (i = 0; i < count; i++) {
5727 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5728
5729 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005730 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005731 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005732}
5733
5734static const struct file_operations binder_fops = {
5735 .owner = THIS_MODULE,
5736 .poll = binder_poll,
5737 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005738 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005739 .mmap = binder_mmap,
5740 .open = binder_open,
5741 .flush = binder_flush,
5742 .release = binder_release,
5743};
5744
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005745BINDER_DEBUG_ENTRY(state);
5746BINDER_DEBUG_ENTRY(stats);
5747BINDER_DEBUG_ENTRY(transactions);
5748BINDER_DEBUG_ENTRY(transaction_log);
5749
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005750static int __init init_binder_device(const char *name)
5751{
5752 int ret;
5753 struct binder_device *binder_device;
5754
5755 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5756 if (!binder_device)
5757 return -ENOMEM;
5758
5759 binder_device->miscdev.fops = &binder_fops;
5760 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5761 binder_device->miscdev.name = name;
5762
5763 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5764 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005765 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005766
5767 ret = misc_register(&binder_device->miscdev);
5768 if (ret < 0) {
5769 kfree(binder_device);
5770 return ret;
5771 }
5772
5773 hlist_add_head(&binder_device->hlist, &binder_devices);
5774
5775 return ret;
5776}
5777
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005778static int __init binder_init(void)
5779{
5780 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005781 char *device_name, *device_names;
5782 struct binder_device *device;
5783 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005784
Tetsuo Handaf8cb8222017-11-29 22:29:47 +09005785 ret = binder_alloc_shrinker_init();
5786 if (ret)
5787 return ret;
Sherry Yang5828d702017-07-29 13:24:11 -07005788
Todd Kjos1cfe6272017-05-24 13:33:28 -07005789 atomic_set(&binder_transaction_log.cur, ~0U);
5790 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5791
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005792 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5793 if (binder_debugfs_dir_entry_root)
5794 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5795 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005796
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005797 if (binder_debugfs_dir_entry_root) {
5798 debugfs_create_file("state",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305799 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005800 binder_debugfs_dir_entry_root,
5801 NULL,
5802 &binder_state_fops);
5803 debugfs_create_file("stats",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305804 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005805 binder_debugfs_dir_entry_root,
5806 NULL,
5807 &binder_stats_fops);
5808 debugfs_create_file("transactions",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305809 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005810 binder_debugfs_dir_entry_root,
5811 NULL,
5812 &binder_transactions_fops);
5813 debugfs_create_file("transaction_log",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305814 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005815 binder_debugfs_dir_entry_root,
5816 &binder_transaction_log,
5817 &binder_transaction_log_fops);
5818 debugfs_create_file("failed_transaction_log",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305819 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005820 binder_debugfs_dir_entry_root,
5821 &binder_transaction_log_failed,
5822 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005823 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005824
5825 /*
5826 * Copy the module_parameter string, because we don't want to
5827 * tokenize it in-place.
5828 */
5829 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5830 if (!device_names) {
5831 ret = -ENOMEM;
5832 goto err_alloc_device_names_failed;
5833 }
5834 strcpy(device_names, binder_devices_param);
5835
5836 while ((device_name = strsep(&device_names, ","))) {
5837 ret = init_binder_device(device_name);
5838 if (ret)
5839 goto err_init_binder_device_failed;
5840 }
5841
5842 return ret;
5843
5844err_init_binder_device_failed:
5845 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5846 misc_deregister(&device->miscdev);
5847 hlist_del(&device->hlist);
5848 kfree(device);
5849 }
5850err_alloc_device_names_failed:
5851 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5852
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005853 return ret;
5854}
5855
5856device_initcall(binder_init);
5857
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005858#define CREATE_TRACE_POINTS
5859#include "binder_trace.h"
5860
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005861MODULE_LICENSE("GPL v2");