blob: 21c8e3c48a0c6477ddf7abdbeeff7b023bacad75 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
Martijn Coenen22d64e4322017-06-02 11:15:44 -070031 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070075#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070076#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090077
Todd Kjos8d9f6f32016-10-17 12:33:15 -070078static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090079static DEFINE_MUTEX(binder_deferred_lock);
80
Martijn Coenen6b7c7122016-09-30 16:08:09 +020081static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090082static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070083static DEFINE_MUTEX(binder_procs_lock);
84
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090085static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070086static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090087
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070088static struct dentry *binder_debugfs_dir_entry_root;
89static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070090static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070092#define BINDER_DEBUG_ENTRY(name) \
93static int binder_##name##_open(struct inode *inode, struct file *file) \
94{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070095 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070096} \
97\
98static const struct file_operations binder_##name##_fops = { \
99 .owner = THIS_MODULE, \
100 .open = binder_##name##_open, \
101 .read = seq_read, \
102 .llseek = seq_lseek, \
103 .release = single_release, \
104}
105
106static int binder_proc_show(struct seq_file *m, void *unused);
107BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900108
109/* This is only defined in include/asm-arm/sizes.h */
110#ifndef SZ_1K
111#define SZ_1K 0x400
112#endif
113
114#ifndef SZ_4M
115#define SZ_4M 0x400000
116#endif
117
118#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
119
120#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
121
122enum {
123 BINDER_DEBUG_USER_ERROR = 1U << 0,
124 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
125 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
126 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
127 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
128 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
129 BINDER_DEBUG_READ_WRITE = 1U << 6,
130 BINDER_DEBUG_USER_REFS = 1U << 7,
131 BINDER_DEBUG_THREADS = 1U << 8,
132 BINDER_DEBUG_TRANSACTION = 1U << 9,
133 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
134 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
135 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700136 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700137 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900138};
139static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
140 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
Harsh Shandilya174562a2017-12-22 19:37:02 +0530141module_param_named(debug_mask, binder_debug_mask, uint, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900142
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200143static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
144module_param_named(devices, binder_devices_param, charp, S_IRUGO);
145
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900146static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
147static int binder_stop_on_user_error;
148
149static int binder_set_stop_on_user_error(const char *val,
Kees Cook24da2c82017-10-17 19:04:42 -0700150 const struct kernel_param *kp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900151{
152 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900153
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900154 ret = param_set_int(val, kp);
155 if (binder_stop_on_user_error < 2)
156 wake_up(&binder_user_error_wait);
157 return ret;
158}
159module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
Harsh Shandilya174562a2017-12-22 19:37:02 +0530160 param_get_int, &binder_stop_on_user_error, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900161
162#define binder_debug(mask, x...) \
163 do { \
164 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400165 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900166 } while (0)
167
168#define binder_user_error(x...) \
169 do { \
170 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400171 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900172 if (binder_stop_on_user_error) \
173 binder_stop_on_user_error = 2; \
174 } while (0)
175
Martijn Coenen00c80372016-07-13 12:06:49 +0200176#define to_flat_binder_object(hdr) \
177 container_of(hdr, struct flat_binder_object, hdr)
178
179#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
180
Martijn Coenen5a6da532016-09-30 14:10:07 +0200181#define to_binder_buffer_object(hdr) \
182 container_of(hdr, struct binder_buffer_object, hdr)
183
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200184#define to_binder_fd_array_object(hdr) \
185 container_of(hdr, struct binder_fd_array_object, hdr)
186
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900187enum binder_stat_types {
188 BINDER_STAT_PROC,
189 BINDER_STAT_THREAD,
190 BINDER_STAT_NODE,
191 BINDER_STAT_REF,
192 BINDER_STAT_DEATH,
193 BINDER_STAT_TRANSACTION,
194 BINDER_STAT_TRANSACTION_COMPLETE,
195 BINDER_STAT_COUNT
196};
197
198struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700199 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
200 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
201 atomic_t obj_created[BINDER_STAT_COUNT];
202 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900203};
204
205static struct binder_stats binder_stats;
206
207static inline void binder_stats_deleted(enum binder_stat_types type)
208{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700209 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900210}
211
212static inline void binder_stats_created(enum binder_stat_types type)
213{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700214 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900215}
216
217struct binder_transaction_log_entry {
218 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700219 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900220 int call_type;
221 int from_proc;
222 int from_thread;
223 int target_handle;
224 int to_proc;
225 int to_thread;
226 int to_node;
227 int data_size;
228 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700229 int return_error_line;
230 uint32_t return_error;
231 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200232 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900233};
234struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700235 atomic_t cur;
236 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900237 struct binder_transaction_log_entry entry[32];
238};
239static struct binder_transaction_log binder_transaction_log;
240static struct binder_transaction_log binder_transaction_log_failed;
241
242static struct binder_transaction_log_entry *binder_transaction_log_add(
243 struct binder_transaction_log *log)
244{
245 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700246 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900247
Todd Kjos1cfe6272017-05-24 13:33:28 -0700248 if (cur >= ARRAY_SIZE(log->entry))
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -0600249 log->full = true;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700250 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
251 WRITE_ONCE(e->debug_id_done, 0);
252 /*
253 * write-barrier to synchronize access to e->debug_id_done.
254 * We make sure the initialized 0 value is seen before
255 * memset() other fields are zeroed by memset.
256 */
257 smp_wmb();
258 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900259 return e;
260}
261
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200262struct binder_context {
263 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700264 struct mutex context_mgr_node_lock;
265
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200266 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200267 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200268};
269
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200270struct binder_device {
271 struct hlist_node hlist;
272 struct miscdevice miscdev;
273 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200274};
275
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700276/**
277 * struct binder_work - work enqueued on a worklist
278 * @entry: node enqueued on list
279 * @type: type of work to be performed
280 *
281 * There are separate work lists for proc, thread, and node (async).
282 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900283struct binder_work {
284 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700285
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900286 enum {
287 BINDER_WORK_TRANSACTION = 1,
288 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700289 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900290 BINDER_WORK_NODE,
291 BINDER_WORK_DEAD_BINDER,
292 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
293 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
294 } type;
295};
296
Todd Kjos858b8da2017-04-21 17:35:12 -0700297struct binder_error {
298 struct binder_work work;
299 uint32_t cmd;
300};
301
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700302/**
303 * struct binder_node - binder node bookkeeping
304 * @debug_id: unique ID for debugging
305 * (invariant after initialized)
306 * @lock: lock for node fields
307 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700308 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700309 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700310 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700311 * @dead_node: element for binder_dead_nodes list
312 * (protected by binder_dead_nodes_lock)
313 * @proc: binder_proc that owns this node
314 * (invariant after initialized)
315 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700316 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700317 * @internal_strong_refs: used to take strong references when
318 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700319 * (protected by @proc->inner_lock if @proc
320 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700321 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700324 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700327 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700328 * (protected by @proc->inner_lock while @proc
329 * is valid, and by binder_dead_nodes_lock
330 * if @proc is NULL. During inc/dec and node release
331 * it is also protected by @lock to provide safety
332 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700333 * @ptr: userspace pointer for node
334 * (invariant, no lock needed)
335 * @cookie: userspace cookie for node
336 * (invariant, no lock needed)
337 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700338 * (protected by @proc->inner_lock if @proc
339 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700340 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700343 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700346 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700349 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700350 * (protected by @lock)
Martijn Coenen6aac9792017-06-07 09:29:14 -0700351 * @sched_policy: minimum scheduling policy for node
352 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700353 * @accept_fds: file descriptor operations supported for node
354 * (invariant after initialized)
355 * @min_priority: minimum scheduling priority
356 * (invariant after initialized)
Martijn Coenenc46810c2017-06-23 10:13:43 -0700357 * @inherit_rt: inherit RT scheduling policy from caller
358 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700359 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700360 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700361 *
362 * Bookkeeping structure for binder nodes.
363 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900364struct binder_node {
365 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700366 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900367 struct binder_work work;
368 union {
369 struct rb_node rb_node;
370 struct hlist_node dead_node;
371 };
372 struct binder_proc *proc;
373 struct hlist_head refs;
374 int internal_strong_refs;
375 int local_weak_refs;
376 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700377 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800378 binder_uintptr_t ptr;
379 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700380 struct {
381 /*
382 * bitfield elements protected by
383 * proc inner_lock
384 */
385 u8 has_strong_ref:1;
386 u8 pending_strong_ref:1;
387 u8 has_weak_ref:1;
388 u8 pending_weak_ref:1;
389 };
390 struct {
391 /*
392 * invariant after initialization
393 */
Martijn Coenen6aac9792017-06-07 09:29:14 -0700394 u8 sched_policy:2;
Martijn Coenenc46810c2017-06-23 10:13:43 -0700395 u8 inherit_rt:1;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700396 u8 accept_fds:1;
397 u8 min_priority;
398 };
399 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400 struct list_head async_todo;
401};
402
403struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700404 /**
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
408 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900409 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800410 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411};
412
Todd Kjosb0117bb2017-05-08 09:16:27 -0700413/**
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
419 *
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
424 */
425struct binder_ref_data {
426 int debug_id;
427 uint32_t desc;
428 int strong;
429 int weak;
430};
431
432/**
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700438 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenf9eac642017-05-22 11:26:23 -0700444 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700445 *
446 * Structure to track references from procA to target node (on procB). This
447 * structure is unsafe to access without holding @proc->outer_lock.
448 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900449struct binder_ref {
450 /* Lookups needed: */
451 /* node + proc => ref (transaction) */
452 /* desc + proc => ref (transaction, inc/dec ref) */
453 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700454 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900455 struct rb_node rb_node_desc;
456 struct rb_node rb_node_node;
457 struct hlist_node node_entry;
458 struct binder_proc *proc;
459 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900460 struct binder_ref_death *death;
461};
462
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900463enum binder_deferred_state {
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200464 BINDER_DEFERRED_PUT_FILES = 0x01,
465 BINDER_DEFERRED_FLUSH = 0x02,
466 BINDER_DEFERRED_RELEASE = 0x04,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900467};
468
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700469/**
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700470 * struct binder_priority - scheduler policy and priority
471 * @sched_policy scheduler policy
472 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
473 *
474 * The binder driver supports inheriting the following scheduler policies:
475 * SCHED_NORMAL
476 * SCHED_BATCH
477 * SCHED_FIFO
478 * SCHED_RR
479 */
480struct binder_priority {
481 unsigned int sched_policy;
482 int prio;
483};
484
485/**
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700486 * struct binder_proc - binder process bookkeeping
487 * @proc_node: element for binder_procs list
488 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700489 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700490 * @nodes: rbtree of binder nodes associated with
491 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700492 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700493 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700494 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700495 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700496 * (protected by @outer_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700497 * @waiting_threads: threads currently waiting for proc work
498 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700499 * @pid PID of group_leader of process
500 * (invariant after initialized)
501 * @tsk task_struct for group_leader of process
502 * (invariant after initialized)
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200503 * @files files_struct for process
Todd Kjosfbb43392017-11-27 09:32:33 -0800504 * (protected by @files_lock)
505 * @files_lock mutex to protect @files
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700506 * @deferred_work_node: element for binder_deferred_list
507 * (protected by binder_deferred_lock)
508 * @deferred_work: bitmap of deferred work to perform
509 * (protected by binder_deferred_lock)
510 * @is_dead: process is dead and awaiting free
511 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700512 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700513 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700514 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700515 * @stats: per-process binder statistics
516 * (atomics, no lock needed)
517 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700518 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700519 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700520 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700521 * @requested_threads: number of binder threads requested but not
522 * yet started. In current implementation, can
523 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700524 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700525 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700526 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700527 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700528 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700529 * @default_priority: default scheduler priority
530 * (invariant after initialized)
531 * @debugfs_entry: debugfs node
532 * @alloc: binder allocator bookkeeping
533 * @context: binder_context for this proc
534 * (invariant after initialized)
535 * @inner_lock: can nest under outer_lock and/or node lock
536 * @outer_lock: no nesting under innor or node lock
537 * Lock order: 1) outer, 2) node, 3) inner
538 *
539 * Bookkeeping structure for binder processes
540 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900541struct binder_proc {
542 struct hlist_node proc_node;
543 struct rb_root threads;
544 struct rb_root nodes;
545 struct rb_root refs_by_desc;
546 struct rb_root refs_by_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700547 struct list_head waiting_threads;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900548 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900549 struct task_struct *tsk;
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200550 struct files_struct *files;
Todd Kjosfbb43392017-11-27 09:32:33 -0800551 struct mutex files_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900552 struct hlist_node deferred_work_node;
553 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700554 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900555
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900556 struct list_head todo;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900557 struct binder_stats stats;
558 struct list_head delivered_death;
559 int max_threads;
560 int requested_threads;
561 int requested_threads_started;
Todd Kjos2f993e22017-05-12 14:42:55 -0700562 int tmp_ref;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700563 struct binder_priority default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700564 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700565 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200566 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700567 spinlock_t inner_lock;
568 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900569};
570
571enum {
572 BINDER_LOOPER_STATE_REGISTERED = 0x01,
573 BINDER_LOOPER_STATE_ENTERED = 0x02,
574 BINDER_LOOPER_STATE_EXITED = 0x04,
575 BINDER_LOOPER_STATE_INVALID = 0x08,
576 BINDER_LOOPER_STATE_WAITING = 0x10,
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700577 BINDER_LOOPER_STATE_POLL = 0x20,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900578};
579
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700580/**
581 * struct binder_thread - binder thread bookkeeping
582 * @proc: binder process for this thread
583 * (invariant after initialization)
584 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700585 * (protected by @proc->inner_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700586 * @waiting_thread_node: element for @proc->waiting_threads list
587 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700588 * @pid: PID for this thread
589 * (invariant after initialization)
590 * @looper: bitmap of looping state
591 * (only accessed by this thread)
592 * @looper_needs_return: looping thread needs to exit driver
593 * (no lock needed)
594 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700595 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700596 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700597 * (protected by @proc->inner_lock)
Martijn Coenen1af61802017-10-19 15:04:46 +0200598 * @process_todo: whether work in @todo should be processed
599 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700600 * @return_error: transaction errors reported by this thread
601 * (only accessed by this thread)
602 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700603 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700604 * @wait: wait queue for thread work
605 * @stats: per-thread statistics
606 * (atomics, no lock needed)
607 * @tmp_ref: temporary reference to indicate thread is in use
608 * (atomic since @proc->inner_lock cannot
609 * always be acquired)
610 * @is_dead: thread is dead and awaiting free
611 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700612 * (protected by @proc->inner_lock)
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700613 * @task: struct task_struct for this thread
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700614 *
615 * Bookkeeping structure for binder threads.
616 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900617struct binder_thread {
618 struct binder_proc *proc;
619 struct rb_node rb_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700620 struct list_head waiting_thread_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900621 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800622 int looper; /* only modified by this thread */
623 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900624 struct binder_transaction *transaction_stack;
625 struct list_head todo;
Martijn Coenen1af61802017-10-19 15:04:46 +0200626 bool process_todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700627 struct binder_error return_error;
628 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900629 wait_queue_head_t wait;
630 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700631 atomic_t tmp_ref;
632 bool is_dead;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700633 struct task_struct *task;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900634};
635
636struct binder_transaction {
637 int debug_id;
638 struct binder_work work;
639 struct binder_thread *from;
640 struct binder_transaction *from_parent;
641 struct binder_proc *to_proc;
642 struct binder_thread *to_thread;
643 struct binder_transaction *to_parent;
644 unsigned need_reply:1;
645 /* unsigned is_dead:1; */ /* not used at the moment */
646
647 struct binder_buffer *buffer;
648 unsigned int code;
649 unsigned int flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700650 struct binder_priority priority;
651 struct binder_priority saved_priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700652 bool set_priority_called;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600653 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700654 /**
655 * @lock: protects @from, @to_proc, and @to_thread
656 *
657 * @from, @to_proc, and @to_thread can be set to NULL
658 * during thread teardown
659 */
660 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900661};
662
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700663/**
664 * binder_proc_lock() - Acquire outer lock for given binder_proc
665 * @proc: struct binder_proc to acquire
666 *
667 * Acquires proc->outer_lock. Used to protect binder_ref
668 * structures associated with the given proc.
669 */
670#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
671static void
672_binder_proc_lock(struct binder_proc *proc, int line)
673{
674 binder_debug(BINDER_DEBUG_SPINLOCKS,
675 "%s: line=%d\n", __func__, line);
676 spin_lock(&proc->outer_lock);
677}
678
679/**
680 * binder_proc_unlock() - Release spinlock for given binder_proc
681 * @proc: struct binder_proc to acquire
682 *
683 * Release lock acquired via binder_proc_lock()
684 */
685#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
686static void
687_binder_proc_unlock(struct binder_proc *proc, int line)
688{
689 binder_debug(BINDER_DEBUG_SPINLOCKS,
690 "%s: line=%d\n", __func__, line);
691 spin_unlock(&proc->outer_lock);
692}
693
694/**
695 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
696 * @proc: struct binder_proc to acquire
697 *
698 * Acquires proc->inner_lock. Used to protect todo lists
699 */
700#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
701static void
702_binder_inner_proc_lock(struct binder_proc *proc, int line)
703{
704 binder_debug(BINDER_DEBUG_SPINLOCKS,
705 "%s: line=%d\n", __func__, line);
706 spin_lock(&proc->inner_lock);
707}
708
709/**
710 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
711 * @proc: struct binder_proc to acquire
712 *
713 * Release lock acquired via binder_inner_proc_lock()
714 */
715#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
716static void
717_binder_inner_proc_unlock(struct binder_proc *proc, int line)
718{
719 binder_debug(BINDER_DEBUG_SPINLOCKS,
720 "%s: line=%d\n", __func__, line);
721 spin_unlock(&proc->inner_lock);
722}
723
724/**
725 * binder_node_lock() - Acquire spinlock for given binder_node
726 * @node: struct binder_node to acquire
727 *
728 * Acquires node->lock. Used to protect binder_node fields
729 */
730#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
731static void
732_binder_node_lock(struct binder_node *node, int line)
733{
734 binder_debug(BINDER_DEBUG_SPINLOCKS,
735 "%s: line=%d\n", __func__, line);
736 spin_lock(&node->lock);
737}
738
739/**
740 * binder_node_unlock() - Release spinlock for given binder_proc
741 * @node: struct binder_node to acquire
742 *
743 * Release lock acquired via binder_node_lock()
744 */
745#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
746static void
747_binder_node_unlock(struct binder_node *node, int line)
748{
749 binder_debug(BINDER_DEBUG_SPINLOCKS,
750 "%s: line=%d\n", __func__, line);
751 spin_unlock(&node->lock);
752}
753
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700754/**
755 * binder_node_inner_lock() - Acquire node and inner locks
756 * @node: struct binder_node to acquire
757 *
758 * Acquires node->lock. If node->proc also acquires
759 * proc->inner_lock. Used to protect binder_node fields
760 */
761#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
762static void
763_binder_node_inner_lock(struct binder_node *node, int line)
764{
765 binder_debug(BINDER_DEBUG_SPINLOCKS,
766 "%s: line=%d\n", __func__, line);
767 spin_lock(&node->lock);
768 if (node->proc)
769 binder_inner_proc_lock(node->proc);
770}
771
772/**
773 * binder_node_unlock() - Release node and inner locks
774 * @node: struct binder_node to acquire
775 *
776 * Release lock acquired via binder_node_lock()
777 */
778#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
779static void
780_binder_node_inner_unlock(struct binder_node *node, int line)
781{
782 struct binder_proc *proc = node->proc;
783
784 binder_debug(BINDER_DEBUG_SPINLOCKS,
785 "%s: line=%d\n", __func__, line);
786 if (proc)
787 binder_inner_proc_unlock(proc);
788 spin_unlock(&node->lock);
789}
790
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700791static bool binder_worklist_empty_ilocked(struct list_head *list)
792{
793 return list_empty(list);
794}
795
796/**
797 * binder_worklist_empty() - Check if no items on the work list
798 * @proc: binder_proc associated with list
799 * @list: list to check
800 *
801 * Return: true if there are no items on list, else false
802 */
803static bool binder_worklist_empty(struct binder_proc *proc,
804 struct list_head *list)
805{
806 bool ret;
807
808 binder_inner_proc_lock(proc);
809 ret = binder_worklist_empty_ilocked(list);
810 binder_inner_proc_unlock(proc);
811 return ret;
812}
813
Martijn Coenen1af61802017-10-19 15:04:46 +0200814/**
815 * binder_enqueue_work_ilocked() - Add an item to the work list
816 * @work: struct binder_work to add to list
817 * @target_list: list to add work to
818 *
819 * Adds the work to the specified list. Asserts that work
820 * is not already on a list.
821 *
822 * Requires the proc->inner_lock to be held.
823 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700824static void
825binder_enqueue_work_ilocked(struct binder_work *work,
826 struct list_head *target_list)
827{
828 BUG_ON(target_list == NULL);
829 BUG_ON(work->entry.next && !list_empty(&work->entry));
830 list_add_tail(&work->entry, target_list);
831}
832
833/**
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100834 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
Martijn Coenen1af61802017-10-19 15:04:46 +0200835 * @thread: thread to queue work to
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700836 * @work: struct binder_work to add to list
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700837 *
Martijn Coenen1af61802017-10-19 15:04:46 +0200838 * Adds the work to the todo list of the thread. Doesn't set the process_todo
839 * flag, which means that (if it wasn't already set) the thread will go to
840 * sleep without handling this work when it calls read.
841 *
842 * Requires the proc->inner_lock to be held.
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700843 */
844static void
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100845binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
846 struct binder_work *work)
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700847{
Martijn Coenen1af61802017-10-19 15:04:46 +0200848 binder_enqueue_work_ilocked(work, &thread->todo);
849}
850
851/**
852 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
853 * @thread: thread to queue work to
854 * @work: struct binder_work to add to list
855 *
856 * Adds the work to the todo list of the thread, and enables processing
857 * of the todo queue.
858 *
859 * Requires the proc->inner_lock to be held.
860 */
861static void
862binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
863 struct binder_work *work)
864{
865 binder_enqueue_work_ilocked(work, &thread->todo);
866 thread->process_todo = true;
867}
868
869/**
870 * binder_enqueue_thread_work() - Add an item to the thread work list
871 * @thread: thread to queue work to
872 * @work: struct binder_work to add to list
873 *
874 * Adds the work to the todo list of the thread, and enables processing
875 * of the todo queue.
876 */
877static void
878binder_enqueue_thread_work(struct binder_thread *thread,
879 struct binder_work *work)
880{
881 binder_inner_proc_lock(thread->proc);
882 binder_enqueue_thread_work_ilocked(thread, work);
883 binder_inner_proc_unlock(thread->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700884}
885
886static void
887binder_dequeue_work_ilocked(struct binder_work *work)
888{
889 list_del_init(&work->entry);
890}
891
892/**
893 * binder_dequeue_work() - Removes an item from the work list
894 * @proc: binder_proc associated with list
895 * @work: struct binder_work to remove from list
896 *
897 * Removes the specified work item from whatever list it is on.
898 * Can safely be called if work is not on any list.
899 */
900static void
901binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
902{
903 binder_inner_proc_lock(proc);
904 binder_dequeue_work_ilocked(work);
905 binder_inner_proc_unlock(proc);
906}
907
908static struct binder_work *binder_dequeue_work_head_ilocked(
909 struct list_head *list)
910{
911 struct binder_work *w;
912
913 w = list_first_entry_or_null(list, struct binder_work, entry);
914 if (w)
915 list_del_init(&w->entry);
916 return w;
917}
918
919/**
920 * binder_dequeue_work_head() - Dequeues the item at head of list
921 * @proc: binder_proc associated with list
922 * @list: list to dequeue head
923 *
924 * Removes the head of the list if there are items on the list
925 *
926 * Return: pointer dequeued binder_work, NULL if list was empty
927 */
928static struct binder_work *binder_dequeue_work_head(
929 struct binder_proc *proc,
930 struct list_head *list)
931{
932 struct binder_work *w;
933
934 binder_inner_proc_lock(proc);
935 w = binder_dequeue_work_head_ilocked(list);
936 binder_inner_proc_unlock(proc);
937 return w;
938}
939
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900940static void
941binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700942static void binder_free_thread(struct binder_thread *thread);
943static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700944static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900945
Sachin Kamatefde99c2012-08-17 16:39:36 +0530946static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900947{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900948 unsigned long rlim_cur;
949 unsigned long irqs;
Todd Kjosfbb43392017-11-27 09:32:33 -0800950 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900951
Todd Kjosfbb43392017-11-27 09:32:33 -0800952 mutex_lock(&proc->files_lock);
953 if (proc->files == NULL) {
954 ret = -ESRCH;
955 goto err;
956 }
957 if (!lock_task_sighand(proc->tsk, &irqs)) {
958 ret = -EMFILE;
959 goto err;
960 }
Al Virodcfadfa2012-08-12 17:27:30 -0400961 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
962 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900963
Todd Kjosfbb43392017-11-27 09:32:33 -0800964 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
965err:
966 mutex_unlock(&proc->files_lock);
967 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900968}
969
970/*
971 * copied from fd_install
972 */
973static void task_fd_install(
974 struct binder_proc *proc, unsigned int fd, struct file *file)
975{
Todd Kjosfbb43392017-11-27 09:32:33 -0800976 mutex_lock(&proc->files_lock);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200977 if (proc->files)
978 __fd_install(proc->files, fd, file);
Todd Kjosfbb43392017-11-27 09:32:33 -0800979 mutex_unlock(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900980}
981
982/*
983 * copied from sys_close
984 */
985static long task_close_fd(struct binder_proc *proc, unsigned int fd)
986{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900987 int retval;
988
Todd Kjosfbb43392017-11-27 09:32:33 -0800989 mutex_lock(&proc->files_lock);
990 if (proc->files == NULL) {
991 retval = -ESRCH;
992 goto err;
993 }
Martijn Coenen6f7e5f92018-06-15 11:53:36 +0200994 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900995 /* can't restart close syscall because file table entry was cleared */
996 if (unlikely(retval == -ERESTARTSYS ||
997 retval == -ERESTARTNOINTR ||
998 retval == -ERESTARTNOHAND ||
999 retval == -ERESTART_RESTARTBLOCK))
1000 retval = -EINTR;
Todd Kjosfbb43392017-11-27 09:32:33 -08001001err:
1002 mutex_unlock(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001003 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001004}
1005
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001006static bool binder_has_work_ilocked(struct binder_thread *thread,
1007 bool do_proc_work)
1008{
Martijn Coenen1af61802017-10-19 15:04:46 +02001009 return thread->process_todo ||
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001010 thread->looper_need_return ||
1011 (do_proc_work &&
1012 !binder_worklist_empty_ilocked(&thread->proc->todo));
1013}
1014
1015static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1016{
1017 bool has_work;
1018
1019 binder_inner_proc_lock(thread->proc);
1020 has_work = binder_has_work_ilocked(thread, do_proc_work);
1021 binder_inner_proc_unlock(thread->proc);
1022
1023 return has_work;
1024}
1025
1026static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1027{
1028 return !thread->transaction_stack &&
1029 binder_worklist_empty_ilocked(&thread->todo) &&
1030 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1031 BINDER_LOOPER_STATE_REGISTERED));
1032}
1033
1034static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1035 bool sync)
1036{
1037 struct rb_node *n;
1038 struct binder_thread *thread;
1039
1040 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1041 thread = rb_entry(n, struct binder_thread, rb_node);
1042 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1043 binder_available_for_proc_work_ilocked(thread)) {
1044 if (sync)
1045 wake_up_interruptible_sync(&thread->wait);
1046 else
1047 wake_up_interruptible(&thread->wait);
1048 }
1049 }
1050}
1051
Martijn Coenen053be422017-06-06 15:17:46 -07001052/**
1053 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1054 * @proc: process to select a thread from
1055 *
1056 * Note that calling this function moves the thread off the waiting_threads
1057 * list, so it can only be woken up by the caller of this function, or a
1058 * signal. Therefore, callers *should* always wake up the thread this function
1059 * returns.
1060 *
1061 * Return: If there's a thread currently waiting for process work,
1062 * returns that thread. Otherwise returns NULL.
1063 */
1064static struct binder_thread *
1065binder_select_thread_ilocked(struct binder_proc *proc)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001066{
1067 struct binder_thread *thread;
1068
Martijn Coenened323352017-07-27 23:52:24 +02001069 assert_spin_locked(&proc->inner_lock);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001070 thread = list_first_entry_or_null(&proc->waiting_threads,
1071 struct binder_thread,
1072 waiting_thread_node);
1073
Martijn Coenen053be422017-06-06 15:17:46 -07001074 if (thread)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001075 list_del_init(&thread->waiting_thread_node);
Martijn Coenen053be422017-06-06 15:17:46 -07001076
1077 return thread;
1078}
1079
1080/**
1081 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1082 * @proc: process to wake up a thread in
1083 * @thread: specific thread to wake-up (may be NULL)
1084 * @sync: whether to do a synchronous wake-up
1085 *
1086 * This function wakes up a thread in the @proc process.
1087 * The caller may provide a specific thread to wake-up in
1088 * the @thread parameter. If @thread is NULL, this function
1089 * will wake up threads that have called poll().
1090 *
1091 * Note that for this function to work as expected, callers
1092 * should first call binder_select_thread() to find a thread
1093 * to handle the work (if they don't have a thread already),
1094 * and pass the result into the @thread parameter.
1095 */
1096static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1097 struct binder_thread *thread,
1098 bool sync)
1099{
Martijn Coenened323352017-07-27 23:52:24 +02001100 assert_spin_locked(&proc->inner_lock);
Martijn Coenen053be422017-06-06 15:17:46 -07001101
1102 if (thread) {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001103 if (sync)
1104 wake_up_interruptible_sync(&thread->wait);
1105 else
1106 wake_up_interruptible(&thread->wait);
1107 return;
1108 }
1109
1110 /* Didn't find a thread waiting for proc work; this can happen
1111 * in two scenarios:
1112 * 1. All threads are busy handling transactions
1113 * In that case, one of those threads should call back into
1114 * the kernel driver soon and pick up this work.
1115 * 2. Threads are using the (e)poll interface, in which case
1116 * they may be blocked on the waitqueue without having been
1117 * added to waiting_threads. For this case, we just iterate
1118 * over all threads not handling transaction work, and
1119 * wake them all up. We wake all because we don't know whether
1120 * a thread that called into (e)poll is handling non-binder
1121 * work currently.
1122 */
1123 binder_wakeup_poll_threads_ilocked(proc, sync);
1124}
1125
Martijn Coenen053be422017-06-06 15:17:46 -07001126static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1127{
1128 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1129
1130 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1131}
1132
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001133static bool is_rt_policy(int policy)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001134{
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001135 return policy == SCHED_FIFO || policy == SCHED_RR;
1136}
Seunghun Lee10f62862014-05-01 01:30:23 +09001137
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001138static bool is_fair_policy(int policy)
1139{
1140 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1141}
1142
1143static bool binder_supported_policy(int policy)
1144{
1145 return is_fair_policy(policy) || is_rt_policy(policy);
1146}
1147
1148static int to_userspace_prio(int policy, int kernel_priority)
1149{
1150 if (is_fair_policy(policy))
1151 return PRIO_TO_NICE(kernel_priority);
1152 else
1153 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1154}
1155
1156static int to_kernel_prio(int policy, int user_priority)
1157{
1158 if (is_fair_policy(policy))
1159 return NICE_TO_PRIO(user_priority);
1160 else
1161 return MAX_USER_RT_PRIO - 1 - user_priority;
1162}
1163
Martijn Coenenecd972d2017-05-26 10:48:56 -07001164static void binder_do_set_priority(struct task_struct *task,
1165 struct binder_priority desired,
1166 bool verify)
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001167{
1168 int priority; /* user-space prio value */
1169 bool has_cap_nice;
1170 unsigned int policy = desired.sched_policy;
1171
1172 if (task->policy == policy && task->normal_prio == desired.prio)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001173 return;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001174
1175 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1176
1177 priority = to_userspace_prio(policy, desired.prio);
1178
Martijn Coenenecd972d2017-05-26 10:48:56 -07001179 if (verify && is_rt_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001180 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1181
1182 if (max_rtprio == 0) {
1183 policy = SCHED_NORMAL;
1184 priority = MIN_NICE;
1185 } else if (priority > max_rtprio) {
1186 priority = max_rtprio;
1187 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001188 }
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001189
Martijn Coenenecd972d2017-05-26 10:48:56 -07001190 if (verify && is_fair_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001191 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1192
1193 if (min_nice > MAX_NICE) {
1194 binder_user_error("%d RLIMIT_NICE not set\n",
1195 task->pid);
1196 return;
1197 } else if (priority < min_nice) {
1198 priority = min_nice;
1199 }
1200 }
1201
1202 if (policy != desired.sched_policy ||
1203 to_kernel_prio(policy, priority) != desired.prio)
1204 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1205 "%d: priority %d not allowed, using %d instead\n",
1206 task->pid, desired.prio,
1207 to_kernel_prio(policy, priority));
1208
Martijn Coenen81402ea2017-05-08 09:33:22 -07001209 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1210 to_kernel_prio(policy, priority),
1211 desired.prio);
1212
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001213 /* Set the actual priority */
1214 if (task->policy != policy || is_rt_policy(policy)) {
1215 struct sched_param params;
1216
1217 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1218
1219 sched_setscheduler_nocheck(task,
1220 policy | SCHED_RESET_ON_FORK,
1221 &params);
1222 }
1223 if (is_fair_policy(policy))
1224 set_user_nice(task, priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001225}
1226
Martijn Coenenecd972d2017-05-26 10:48:56 -07001227static void binder_set_priority(struct task_struct *task,
1228 struct binder_priority desired)
1229{
1230 binder_do_set_priority(task, desired, /* verify = */ true);
1231}
1232
1233static void binder_restore_priority(struct task_struct *task,
1234 struct binder_priority desired)
1235{
1236 binder_do_set_priority(task, desired, /* verify = */ false);
1237}
1238
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001239static void binder_transaction_priority(struct task_struct *task,
1240 struct binder_transaction *t,
Martijn Coenenc46810c2017-06-23 10:13:43 -07001241 struct binder_priority node_prio,
1242 bool inherit_rt)
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001243{
Ganesh Mahendran9add7c42017-09-27 15:12:25 +08001244 struct binder_priority desired_prio = t->priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001245
1246 if (t->set_priority_called)
1247 return;
1248
1249 t->set_priority_called = true;
1250 t->saved_priority.sched_policy = task->policy;
1251 t->saved_priority.prio = task->normal_prio;
1252
Martijn Coenenc46810c2017-06-23 10:13:43 -07001253 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1254 desired_prio.prio = NICE_TO_PRIO(0);
1255 desired_prio.sched_policy = SCHED_NORMAL;
Martijn Coenenc46810c2017-06-23 10:13:43 -07001256 }
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001257
1258 if (node_prio.prio < t->priority.prio ||
1259 (node_prio.prio == t->priority.prio &&
1260 node_prio.sched_policy == SCHED_FIFO)) {
1261 /*
1262 * In case the minimum priority on the node is
1263 * higher (lower value), use that priority. If
1264 * the priority is the same, but the node uses
1265 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1266 * run unbounded, unlike SCHED_RR.
1267 */
1268 desired_prio = node_prio;
1269 }
1270
1271 binder_set_priority(task, desired_prio);
1272}
1273
Todd Kjos425d23f2017-06-12 12:07:26 -07001274static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1275 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001276{
1277 struct rb_node *n = proc->nodes.rb_node;
1278 struct binder_node *node;
1279
Martijn Coenened323352017-07-27 23:52:24 +02001280 assert_spin_locked(&proc->inner_lock);
Todd Kjos425d23f2017-06-12 12:07:26 -07001281
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001282 while (n) {
1283 node = rb_entry(n, struct binder_node, rb_node);
1284
1285 if (ptr < node->ptr)
1286 n = n->rb_left;
1287 else if (ptr > node->ptr)
1288 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -07001289 else {
1290 /*
1291 * take an implicit weak reference
1292 * to ensure node stays alive until
1293 * call to binder_put_node()
1294 */
Todd Kjos425d23f2017-06-12 12:07:26 -07001295 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001296 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001297 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001298 }
1299 return NULL;
1300}
1301
Todd Kjos425d23f2017-06-12 12:07:26 -07001302static struct binder_node *binder_get_node(struct binder_proc *proc,
1303 binder_uintptr_t ptr)
1304{
1305 struct binder_node *node;
1306
1307 binder_inner_proc_lock(proc);
1308 node = binder_get_node_ilocked(proc, ptr);
1309 binder_inner_proc_unlock(proc);
1310 return node;
1311}
1312
1313static struct binder_node *binder_init_node_ilocked(
1314 struct binder_proc *proc,
1315 struct binder_node *new_node,
1316 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001317{
1318 struct rb_node **p = &proc->nodes.rb_node;
1319 struct rb_node *parent = NULL;
1320 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001321 binder_uintptr_t ptr = fp ? fp->binder : 0;
1322 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1323 __u32 flags = fp ? fp->flags : 0;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001324 s8 priority;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001325
Martijn Coenened323352017-07-27 23:52:24 +02001326 assert_spin_locked(&proc->inner_lock);
1327
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001328 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001329
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001330 parent = *p;
1331 node = rb_entry(parent, struct binder_node, rb_node);
1332
1333 if (ptr < node->ptr)
1334 p = &(*p)->rb_left;
1335 else if (ptr > node->ptr)
1336 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001337 else {
1338 /*
1339 * A matching node is already in
1340 * the rb tree. Abandon the init
1341 * and return it.
1342 */
1343 binder_inc_node_tmpref_ilocked(node);
1344 return node;
1345 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001346 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001347 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001348 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001349 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001350 rb_link_node(&node->rb_node, parent, p);
1351 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001352 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001353 node->proc = proc;
1354 node->ptr = ptr;
1355 node->cookie = cookie;
1356 node->work.type = BINDER_WORK_NODE;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001357 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
Ganesh Mahendran6cd26312017-09-26 17:56:25 +08001358 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
Martijn Coenen6aac9792017-06-07 09:29:14 -07001359 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1360 node->min_priority = to_kernel_prio(node->sched_policy, priority);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001361 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Martijn Coenenc46810c2017-06-23 10:13:43 -07001362 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001363 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001364 INIT_LIST_HEAD(&node->work.entry);
1365 INIT_LIST_HEAD(&node->async_todo);
1366 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001367 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001368 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001369 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001370
1371 return node;
1372}
1373
1374static struct binder_node *binder_new_node(struct binder_proc *proc,
1375 struct flat_binder_object *fp)
1376{
1377 struct binder_node *node;
1378 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1379
1380 if (!new_node)
1381 return NULL;
1382 binder_inner_proc_lock(proc);
1383 node = binder_init_node_ilocked(proc, new_node, fp);
1384 binder_inner_proc_unlock(proc);
1385 if (node != new_node)
1386 /*
1387 * The node was already added by another thread
1388 */
1389 kfree(new_node);
1390
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001391 return node;
1392}
1393
Todd Kjose7f23ed2017-03-21 13:06:01 -07001394static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001395{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001396 kfree(node);
1397 binder_stats_deleted(BINDER_STAT_NODE);
1398}
1399
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001400static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1401 int internal,
1402 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001403{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001404 struct binder_proc *proc = node->proc;
1405
Martijn Coenened323352017-07-27 23:52:24 +02001406 assert_spin_locked(&node->lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001407 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001408 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001409 if (strong) {
1410 if (internal) {
1411 if (target_list == NULL &&
1412 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001413 !(node->proc &&
1414 node == node->proc->context->
1415 binder_context_mgr_node &&
1416 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301417 pr_err("invalid inc strong node for %d\n",
1418 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001419 return -EINVAL;
1420 }
1421 node->internal_strong_refs++;
1422 } else
1423 node->local_strong_refs++;
1424 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001425 binder_dequeue_work_ilocked(&node->work);
Martijn Coenen1af61802017-10-19 15:04:46 +02001426 /*
1427 * Note: this function is the only place where we queue
1428 * directly to a thread->todo without using the
1429 * corresponding binder_enqueue_thread_work() helper
1430 * functions; in this case it's ok to not set the
1431 * process_todo flag, since we know this node work will
1432 * always be followed by other work that starts queue
1433 * processing: in case of synchronous transactions, a
1434 * BR_REPLY or BR_ERROR; in case of oneway
1435 * transactions, a BR_TRANSACTION_COMPLETE.
1436 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001437 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001438 }
1439 } else {
1440 if (!internal)
1441 node->local_weak_refs++;
1442 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1443 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301444 pr_err("invalid inc weak node for %d\n",
1445 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001446 return -EINVAL;
1447 }
Martijn Coenen1af61802017-10-19 15:04:46 +02001448 /*
1449 * See comment above
1450 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001451 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001452 }
1453 }
1454 return 0;
1455}
1456
Todd Kjose7f23ed2017-03-21 13:06:01 -07001457static int binder_inc_node(struct binder_node *node, int strong, int internal,
1458 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001459{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001460 int ret;
1461
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001462 binder_node_inner_lock(node);
1463 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1464 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001465
1466 return ret;
1467}
1468
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001469static bool binder_dec_node_nilocked(struct binder_node *node,
1470 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001471{
1472 struct binder_proc *proc = node->proc;
1473
Martijn Coenened323352017-07-27 23:52:24 +02001474 assert_spin_locked(&node->lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001475 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001476 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001477 if (strong) {
1478 if (internal)
1479 node->internal_strong_refs--;
1480 else
1481 node->local_strong_refs--;
1482 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001483 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001484 } else {
1485 if (!internal)
1486 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001487 if (node->local_weak_refs || node->tmp_refs ||
1488 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001489 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001490 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001491
1492 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001493 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001494 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07001495 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001496 }
1497 } else {
1498 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001499 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001500 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001501 binder_dequeue_work_ilocked(&node->work);
1502 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001503 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301504 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001505 node->debug_id);
1506 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001507 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001508 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001509 /*
1510 * tmp_refs could have changed so
1511 * check it again
1512 */
1513 if (node->tmp_refs) {
1514 spin_unlock(&binder_dead_nodes_lock);
1515 return false;
1516 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001517 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001518 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001519 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301520 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001521 node->debug_id);
1522 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001523 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001524 }
1525 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001526 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001527}
1528
Todd Kjose7f23ed2017-03-21 13:06:01 -07001529static void binder_dec_node(struct binder_node *node, int strong, int internal)
1530{
1531 bool free_node;
1532
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001533 binder_node_inner_lock(node);
1534 free_node = binder_dec_node_nilocked(node, strong, internal);
1535 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001536 if (free_node)
1537 binder_free_node(node);
1538}
1539
1540static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001541{
1542 /*
1543 * No call to binder_inc_node() is needed since we
1544 * don't need to inform userspace of any changes to
1545 * tmp_refs
1546 */
1547 node->tmp_refs++;
1548}
1549
1550/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001551 * binder_inc_node_tmpref() - take a temporary reference on node
1552 * @node: node to reference
1553 *
1554 * Take reference on node to prevent the node from being freed
1555 * while referenced only by a local variable. The inner lock is
1556 * needed to serialize with the node work on the queue (which
1557 * isn't needed after the node is dead). If the node is dead
1558 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1559 * node->tmp_refs against dead-node-only cases where the node
1560 * lock cannot be acquired (eg traversing the dead node list to
1561 * print nodes)
1562 */
1563static void binder_inc_node_tmpref(struct binder_node *node)
1564{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001565 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001566 if (node->proc)
1567 binder_inner_proc_lock(node->proc);
1568 else
1569 spin_lock(&binder_dead_nodes_lock);
1570 binder_inc_node_tmpref_ilocked(node);
1571 if (node->proc)
1572 binder_inner_proc_unlock(node->proc);
1573 else
1574 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001575 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001576}
1577
1578/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001579 * binder_dec_node_tmpref() - remove a temporary reference on node
1580 * @node: node to reference
1581 *
1582 * Release temporary reference on node taken via binder_inc_node_tmpref()
1583 */
1584static void binder_dec_node_tmpref(struct binder_node *node)
1585{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001586 bool free_node;
1587
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001588 binder_node_inner_lock(node);
1589 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001590 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001591 node->tmp_refs--;
1592 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001593 if (!node->proc)
1594 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001595 /*
1596 * Call binder_dec_node() to check if all refcounts are 0
1597 * and cleanup is needed. Calling with strong=0 and internal=1
1598 * causes no actual reference to be released in binder_dec_node().
1599 * If that changes, a change is needed here too.
1600 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001601 free_node = binder_dec_node_nilocked(node, 0, 1);
1602 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001603 if (free_node)
1604 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001605}
1606
1607static void binder_put_node(struct binder_node *node)
1608{
1609 binder_dec_node_tmpref(node);
1610}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001611
Todd Kjos5346bf32016-10-20 16:43:34 -07001612static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1613 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001614{
1615 struct rb_node *n = proc->refs_by_desc.rb_node;
1616 struct binder_ref *ref;
1617
1618 while (n) {
1619 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1620
Todd Kjosb0117bb2017-05-08 09:16:27 -07001621 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001622 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001623 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001624 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001625 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001626 binder_user_error("tried to use weak ref as strong ref\n");
1627 return NULL;
1628 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001629 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001630 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001631 }
1632 return NULL;
1633}
1634
Todd Kjosb0117bb2017-05-08 09:16:27 -07001635/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001636 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001637 * @proc: binder_proc that owns the ref
1638 * @node: binder_node of target
1639 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1640 *
1641 * Look up the ref for the given node and return it if it exists
1642 *
1643 * If it doesn't exist and the caller provides a newly allocated
1644 * ref, initialize the fields of the newly allocated ref and insert
1645 * into the given proc rb_trees and node refs list.
1646 *
1647 * Return: the ref for node. It is possible that another thread
1648 * allocated/initialized the ref first in which case the
1649 * returned ref would be different than the passed-in
1650 * new_ref. new_ref must be kfree'd by the caller in
1651 * this case.
1652 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001653static struct binder_ref *binder_get_ref_for_node_olocked(
1654 struct binder_proc *proc,
1655 struct binder_node *node,
1656 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001657{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001658 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001659 struct rb_node **p = &proc->refs_by_node.rb_node;
1660 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001661 struct binder_ref *ref;
1662 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001663
1664 while (*p) {
1665 parent = *p;
1666 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1667
1668 if (node < ref->node)
1669 p = &(*p)->rb_left;
1670 else if (node > ref->node)
1671 p = &(*p)->rb_right;
1672 else
1673 return ref;
1674 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001675 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001676 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001677
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001678 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001679 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001680 new_ref->proc = proc;
1681 new_ref->node = node;
1682 rb_link_node(&new_ref->rb_node_node, parent, p);
1683 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1684
Todd Kjosb0117bb2017-05-08 09:16:27 -07001685 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001686 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1687 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001688 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001689 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001690 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001691 }
1692
1693 p = &proc->refs_by_desc.rb_node;
1694 while (*p) {
1695 parent = *p;
1696 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1697
Todd Kjosb0117bb2017-05-08 09:16:27 -07001698 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001699 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001700 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001701 p = &(*p)->rb_right;
1702 else
1703 BUG();
1704 }
1705 rb_link_node(&new_ref->rb_node_desc, parent, p);
1706 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001707
1708 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001709 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001710
Todd Kjos4cbe5752017-05-01 17:21:51 -07001711 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1712 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001713 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001714 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001715 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001716 return new_ref;
1717}
1718
Todd Kjos5346bf32016-10-20 16:43:34 -07001719static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001720{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001721 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001722
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001723 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301724 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001725 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301726 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001727
1728 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1729 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001730
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001731 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001732 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001733 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001734
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001735 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001736 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1737 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001738 /*
1739 * Clear ref->node unless we want the caller to free the node
1740 */
1741 if (!delete_node) {
1742 /*
1743 * The caller uses ref->node to determine
1744 * whether the node needs to be freed. Clear
1745 * it since the node is still alive.
1746 */
1747 ref->node = NULL;
1748 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001749
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001750 if (ref->death) {
1751 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301752 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001753 ref->proc->pid, ref->data.debug_id,
1754 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001755 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001756 binder_stats_deleted(BINDER_STAT_DEATH);
1757 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001758 binder_stats_deleted(BINDER_STAT_REF);
1759}
1760
Todd Kjosb0117bb2017-05-08 09:16:27 -07001761/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001762 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001763 * @ref: ref to be incremented
1764 * @strong: if true, strong increment, else weak
1765 * @target_list: list to queue node work on
1766 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001767 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001768 *
1769 * Return: 0, if successful, else errno
1770 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001771static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1772 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001773{
1774 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001775
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001776 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001777 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001778 ret = binder_inc_node(ref->node, 1, 1, target_list);
1779 if (ret)
1780 return ret;
1781 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001782 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001783 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001784 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001785 ret = binder_inc_node(ref->node, 0, 1, target_list);
1786 if (ret)
1787 return ret;
1788 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001789 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001790 }
1791 return 0;
1792}
1793
Todd Kjosb0117bb2017-05-08 09:16:27 -07001794/**
1795 * binder_dec_ref() - dec the ref for given handle
1796 * @ref: ref to be decremented
1797 * @strong: if true, strong decrement, else weak
1798 *
1799 * Decrement the ref.
1800 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001801 * Return: true if ref is cleaned up and ready to be freed
1802 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001803static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001804{
1805 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001806 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301807 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001808 ref->proc->pid, ref->data.debug_id,
1809 ref->data.desc, ref->data.strong,
1810 ref->data.weak);
1811 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001812 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001813 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001814 if (ref->data.strong == 0)
1815 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001816 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001817 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301818 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001819 ref->proc->pid, ref->data.debug_id,
1820 ref->data.desc, ref->data.strong,
1821 ref->data.weak);
1822 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001823 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001824 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001825 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001826 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001827 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001828 return true;
1829 }
1830 return false;
1831}
1832
1833/**
1834 * binder_get_node_from_ref() - get the node from the given proc/desc
1835 * @proc: proc containing the ref
1836 * @desc: the handle associated with the ref
1837 * @need_strong_ref: if true, only return node if ref is strong
1838 * @rdata: the id/refcount data for the ref
1839 *
1840 * Given a proc and ref handle, return the associated binder_node
1841 *
1842 * Return: a binder_node or NULL if not found or not strong when strong required
1843 */
1844static struct binder_node *binder_get_node_from_ref(
1845 struct binder_proc *proc,
1846 u32 desc, bool need_strong_ref,
1847 struct binder_ref_data *rdata)
1848{
1849 struct binder_node *node;
1850 struct binder_ref *ref;
1851
Todd Kjos5346bf32016-10-20 16:43:34 -07001852 binder_proc_lock(proc);
1853 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001854 if (!ref)
1855 goto err_no_ref;
1856 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001857 /*
1858 * Take an implicit reference on the node to ensure
1859 * it stays alive until the call to binder_put_node()
1860 */
1861 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001862 if (rdata)
1863 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001864 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001865
1866 return node;
1867
1868err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001869 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001870 return NULL;
1871}
1872
1873/**
1874 * binder_free_ref() - free the binder_ref
1875 * @ref: ref to free
1876 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001877 * Free the binder_ref. Free the binder_node indicated by ref->node
1878 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001879 */
1880static void binder_free_ref(struct binder_ref *ref)
1881{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001882 if (ref->node)
1883 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001884 kfree(ref->death);
1885 kfree(ref);
1886}
1887
1888/**
1889 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1890 * @proc: proc containing the ref
1891 * @desc: the handle associated with the ref
1892 * @increment: true=inc reference, false=dec reference
1893 * @strong: true=strong reference, false=weak reference
1894 * @rdata: the id/refcount data for the ref
1895 *
1896 * Given a proc and ref handle, increment or decrement the ref
1897 * according to "increment" arg.
1898 *
1899 * Return: 0 if successful, else errno
1900 */
1901static int binder_update_ref_for_handle(struct binder_proc *proc,
1902 uint32_t desc, bool increment, bool strong,
1903 struct binder_ref_data *rdata)
1904{
1905 int ret = 0;
1906 struct binder_ref *ref;
1907 bool delete_ref = false;
1908
Todd Kjos5346bf32016-10-20 16:43:34 -07001909 binder_proc_lock(proc);
1910 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001911 if (!ref) {
1912 ret = -EINVAL;
1913 goto err_no_ref;
1914 }
1915 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001916 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001917 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001918 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001919
1920 if (rdata)
1921 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001922 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001923
1924 if (delete_ref)
1925 binder_free_ref(ref);
1926 return ret;
1927
1928err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001929 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001930 return ret;
1931}
1932
1933/**
1934 * binder_dec_ref_for_handle() - dec the ref for given handle
1935 * @proc: proc containing the ref
1936 * @desc: the handle associated with the ref
1937 * @strong: true=strong reference, false=weak reference
1938 * @rdata: the id/refcount data for the ref
1939 *
1940 * Just calls binder_update_ref_for_handle() to decrement the ref.
1941 *
1942 * Return: 0 if successful, else errno
1943 */
1944static int binder_dec_ref_for_handle(struct binder_proc *proc,
1945 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1946{
1947 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1948}
1949
1950
1951/**
1952 * binder_inc_ref_for_node() - increment the ref for given proc/node
1953 * @proc: proc containing the ref
1954 * @node: target node
1955 * @strong: true=strong reference, false=weak reference
1956 * @target_list: worklist to use if node is incremented
1957 * @rdata: the id/refcount data for the ref
1958 *
1959 * Given a proc and node, increment the ref. Create the ref if it
1960 * doesn't already exist
1961 *
1962 * Return: 0 if successful, else errno
1963 */
1964static int binder_inc_ref_for_node(struct binder_proc *proc,
1965 struct binder_node *node,
1966 bool strong,
1967 struct list_head *target_list,
1968 struct binder_ref_data *rdata)
1969{
1970 struct binder_ref *ref;
1971 struct binder_ref *new_ref = NULL;
1972 int ret = 0;
1973
Todd Kjos5346bf32016-10-20 16:43:34 -07001974 binder_proc_lock(proc);
1975 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001976 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001977 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001978 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1979 if (!new_ref)
1980 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001981 binder_proc_lock(proc);
1982 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001983 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001984 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001985 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001986 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001987 if (new_ref && ref != new_ref)
1988 /*
1989 * Another thread created the ref first so
1990 * free the one we allocated
1991 */
1992 kfree(new_ref);
1993 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001994}
1995
Martijn Coenen995a36e2017-06-02 13:36:52 -07001996static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1997 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001998{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001999 BUG_ON(!target_thread);
Martijn Coenened323352017-07-27 23:52:24 +02002000 assert_spin_locked(&target_thread->proc->inner_lock);
Todd Kjos21ef40a2017-03-30 18:02:13 -07002001 BUG_ON(target_thread->transaction_stack != t);
2002 BUG_ON(target_thread->transaction_stack->from != target_thread);
2003 target_thread->transaction_stack =
2004 target_thread->transaction_stack->from_parent;
2005 t->from = NULL;
2006}
2007
Todd Kjos2f993e22017-05-12 14:42:55 -07002008/**
2009 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2010 * @thread: thread to decrement
2011 *
2012 * A thread needs to be kept alive while being used to create or
2013 * handle a transaction. binder_get_txn_from() is used to safely
2014 * extract t->from from a binder_transaction and keep the thread
2015 * indicated by t->from from being freed. When done with that
2016 * binder_thread, this function is called to decrement the
2017 * tmp_ref and free if appropriate (thread has been released
2018 * and no transaction being processed by the driver)
2019 */
2020static void binder_thread_dec_tmpref(struct binder_thread *thread)
2021{
2022 /*
2023 * atomic is used to protect the counter value while
2024 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07002025 */
Todd Kjosb4827902017-05-25 15:52:17 -07002026 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002027 atomic_dec(&thread->tmp_ref);
2028 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07002029 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002030 binder_free_thread(thread);
2031 return;
2032 }
Todd Kjosb4827902017-05-25 15:52:17 -07002033 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002034}
2035
2036/**
2037 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2038 * @proc: proc to decrement
2039 *
2040 * A binder_proc needs to be kept alive while being used to create or
2041 * handle a transaction. proc->tmp_ref is incremented when
2042 * creating a new transaction or the binder_proc is currently in-use
2043 * by threads that are being released. When done with the binder_proc,
2044 * this function is called to decrement the counter and free the
2045 * proc if appropriate (proc has been released, all threads have
2046 * been released and not currenly in-use to process a transaction).
2047 */
2048static void binder_proc_dec_tmpref(struct binder_proc *proc)
2049{
Todd Kjosb4827902017-05-25 15:52:17 -07002050 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002051 proc->tmp_ref--;
2052 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2053 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07002054 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002055 binder_free_proc(proc);
2056 return;
2057 }
Todd Kjosb4827902017-05-25 15:52:17 -07002058 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002059}
2060
2061/**
2062 * binder_get_txn_from() - safely extract the "from" thread in transaction
2063 * @t: binder transaction for t->from
2064 *
2065 * Atomically return the "from" thread and increment the tmp_ref
2066 * count for the thread to ensure it stays alive until
2067 * binder_thread_dec_tmpref() is called.
2068 *
2069 * Return: the value of t->from
2070 */
2071static struct binder_thread *binder_get_txn_from(
2072 struct binder_transaction *t)
2073{
2074 struct binder_thread *from;
2075
2076 spin_lock(&t->lock);
2077 from = t->from;
2078 if (from)
2079 atomic_inc(&from->tmp_ref);
2080 spin_unlock(&t->lock);
2081 return from;
2082}
2083
Martijn Coenen995a36e2017-06-02 13:36:52 -07002084/**
2085 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2086 * @t: binder transaction for t->from
2087 *
2088 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2089 * to guarantee that the thread cannot be released while operating on it.
2090 * The caller must call binder_inner_proc_unlock() to release the inner lock
2091 * as well as call binder_dec_thread_txn() to release the reference.
2092 *
2093 * Return: the value of t->from
2094 */
2095static struct binder_thread *binder_get_txn_from_and_acq_inner(
2096 struct binder_transaction *t)
2097{
2098 struct binder_thread *from;
2099
2100 from = binder_get_txn_from(t);
2101 if (!from)
2102 return NULL;
2103 binder_inner_proc_lock(from->proc);
2104 if (t->from) {
2105 BUG_ON(from != t->from);
2106 return from;
2107 }
2108 binder_inner_proc_unlock(from->proc);
2109 binder_thread_dec_tmpref(from);
2110 return NULL;
2111}
2112
Todd Kjos21ef40a2017-03-30 18:02:13 -07002113static void binder_free_transaction(struct binder_transaction *t)
2114{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002115 if (t->buffer)
2116 t->buffer->transaction = NULL;
2117 kfree(t);
2118 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2119}
2120
2121static void binder_send_failed_reply(struct binder_transaction *t,
2122 uint32_t error_code)
2123{
2124 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002125 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09002126
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002127 BUG_ON(t->flags & TF_ONE_WAY);
2128 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002129 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002130 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002131 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2132 "send failed reply for transaction %d to %d:%d\n",
2133 t->debug_id,
2134 target_thread->proc->pid,
2135 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002136
Martijn Coenen995a36e2017-06-02 13:36:52 -07002137 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07002138 if (target_thread->reply_error.cmd == BR_OK) {
2139 target_thread->reply_error.cmd = error_code;
Martijn Coenen1af61802017-10-19 15:04:46 +02002140 binder_enqueue_thread_work_ilocked(
2141 target_thread,
2142 &target_thread->reply_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002143 wake_up_interruptible(&target_thread->wait);
2144 } else {
Todd Kjosd3a2afb2018-02-07 12:38:47 -08002145 /*
2146 * Cannot get here for normal operation, but
2147 * we can if multiple synchronous transactions
2148 * are sent without blocking for responses.
2149 * Just ignore the 2nd error in this case.
2150 */
2151 pr_warn("Unexpected reply error: %u\n",
2152 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002153 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002154 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002155 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07002156 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002157 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002158 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002159 next = t->from_parent;
2160
2161 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2162 "send failed reply for transaction %d, target dead\n",
2163 t->debug_id);
2164
Todd Kjos21ef40a2017-03-30 18:02:13 -07002165 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002166 if (next == NULL) {
2167 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2168 "reply failed, no target thread at root\n");
2169 return;
2170 }
2171 t = next;
2172 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2173 "reply failed, no target thread -- retry %d\n",
2174 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002175 }
2176}
2177
Martijn Coenen00c80372016-07-13 12:06:49 +02002178/**
Martijn Coenen3217ccc2017-08-24 15:23:36 +02002179 * binder_cleanup_transaction() - cleans up undelivered transaction
2180 * @t: transaction that needs to be cleaned up
2181 * @reason: reason the transaction wasn't delivered
2182 * @error_code: error to return to caller (if synchronous call)
2183 */
2184static void binder_cleanup_transaction(struct binder_transaction *t,
2185 const char *reason,
2186 uint32_t error_code)
2187{
2188 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2189 binder_send_failed_reply(t, error_code);
2190 } else {
2191 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2192 "undelivered transaction %d, %s\n",
2193 t->debug_id, reason);
2194 binder_free_transaction(t);
2195 }
2196}
2197
2198/**
Martijn Coenen00c80372016-07-13 12:06:49 +02002199 * binder_validate_object() - checks for a valid metadata object in a buffer.
2200 * @buffer: binder_buffer that we're parsing.
2201 * @offset: offset in the buffer at which to validate an object.
2202 *
2203 * Return: If there's a valid metadata object at @offset in @buffer, the
2204 * size of that object. Otherwise, it returns zero.
2205 */
2206static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2207{
2208 /* Check if we can read a header first */
2209 struct binder_object_header *hdr;
2210 size_t object_size = 0;
2211
Dan Carpentera1996892018-03-29 12:14:40 +03002212 if (buffer->data_size < sizeof(*hdr) ||
2213 offset > buffer->data_size - sizeof(*hdr) ||
Martijn Coenen00c80372016-07-13 12:06:49 +02002214 !IS_ALIGNED(offset, sizeof(u32)))
2215 return 0;
2216
2217 /* Ok, now see if we can read a complete object. */
2218 hdr = (struct binder_object_header *)(buffer->data + offset);
2219 switch (hdr->type) {
2220 case BINDER_TYPE_BINDER:
2221 case BINDER_TYPE_WEAK_BINDER:
2222 case BINDER_TYPE_HANDLE:
2223 case BINDER_TYPE_WEAK_HANDLE:
2224 object_size = sizeof(struct flat_binder_object);
2225 break;
2226 case BINDER_TYPE_FD:
2227 object_size = sizeof(struct binder_fd_object);
2228 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002229 case BINDER_TYPE_PTR:
2230 object_size = sizeof(struct binder_buffer_object);
2231 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002232 case BINDER_TYPE_FDA:
2233 object_size = sizeof(struct binder_fd_array_object);
2234 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02002235 default:
2236 return 0;
2237 }
2238 if (offset <= buffer->data_size - object_size &&
2239 buffer->data_size >= object_size)
2240 return object_size;
2241 else
2242 return 0;
2243}
2244
Martijn Coenen5a6da532016-09-30 14:10:07 +02002245/**
2246 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2247 * @b: binder_buffer containing the object
2248 * @index: index in offset array at which the binder_buffer_object is
2249 * located
2250 * @start: points to the start of the offset array
2251 * @num_valid: the number of valid offsets in the offset array
2252 *
2253 * Return: If @index is within the valid range of the offset array
2254 * described by @start and @num_valid, and if there's a valid
2255 * binder_buffer_object at the offset found in index @index
2256 * of the offset array, that object is returned. Otherwise,
2257 * %NULL is returned.
2258 * Note that the offset found in index @index itself is not
2259 * verified; this function assumes that @num_valid elements
2260 * from @start were previously verified to have valid offsets.
2261 */
2262static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2263 binder_size_t index,
2264 binder_size_t *start,
2265 binder_size_t num_valid)
2266{
2267 struct binder_buffer_object *buffer_obj;
2268 binder_size_t *offp;
2269
2270 if (index >= num_valid)
2271 return NULL;
2272
2273 offp = start + index;
2274 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2275 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2276 return NULL;
2277
2278 return buffer_obj;
2279}
2280
2281/**
2282 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2283 * @b: transaction buffer
2284 * @objects_start start of objects buffer
2285 * @buffer: binder_buffer_object in which to fix up
2286 * @offset: start offset in @buffer to fix up
2287 * @last_obj: last binder_buffer_object that we fixed up in
2288 * @last_min_offset: minimum fixup offset in @last_obj
2289 *
2290 * Return: %true if a fixup in buffer @buffer at offset @offset is
2291 * allowed.
2292 *
2293 * For safety reasons, we only allow fixups inside a buffer to happen
2294 * at increasing offsets; additionally, we only allow fixup on the last
2295 * buffer object that was verified, or one of its parents.
2296 *
2297 * Example of what is allowed:
2298 *
2299 * A
2300 * B (parent = A, offset = 0)
2301 * C (parent = A, offset = 16)
2302 * D (parent = C, offset = 0)
2303 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2304 *
2305 * Examples of what is not allowed:
2306 *
2307 * Decreasing offsets within the same parent:
2308 * A
2309 * C (parent = A, offset = 16)
2310 * B (parent = A, offset = 0) // decreasing offset within A
2311 *
2312 * Referring to a parent that wasn't the last object or any of its parents:
2313 * A
2314 * B (parent = A, offset = 0)
2315 * C (parent = A, offset = 0)
2316 * C (parent = A, offset = 16)
2317 * D (parent = B, offset = 0) // B is not A or any of A's parents
2318 */
2319static bool binder_validate_fixup(struct binder_buffer *b,
2320 binder_size_t *objects_start,
2321 struct binder_buffer_object *buffer,
2322 binder_size_t fixup_offset,
2323 struct binder_buffer_object *last_obj,
2324 binder_size_t last_min_offset)
2325{
2326 if (!last_obj) {
2327 /* Nothing to fix up in */
2328 return false;
2329 }
2330
2331 while (last_obj != buffer) {
2332 /*
2333 * Safe to retrieve the parent of last_obj, since it
2334 * was already previously verified by the driver.
2335 */
2336 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2337 return false;
2338 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2339 last_obj = (struct binder_buffer_object *)
2340 (b->data + *(objects_start + last_obj->parent));
2341 }
2342 return (fixup_offset >= last_min_offset);
2343}
2344
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002345static void binder_transaction_buffer_release(struct binder_proc *proc,
2346 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002347 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002348{
Martijn Coenen5a6da532016-09-30 14:10:07 +02002349 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002350 int debug_id = buffer->debug_id;
2351
2352 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosf540ce02018-02-07 13:57:37 -08002353 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002354 proc->pid, buffer->debug_id,
2355 buffer->data_size, buffer->offsets_size, failed_at);
2356
2357 if (buffer->target_node)
2358 binder_dec_node(buffer->target_node, 1, 0);
2359
Martijn Coenen5a6da532016-09-30 14:10:07 +02002360 off_start = (binder_size_t *)(buffer->data +
2361 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002362 if (failed_at)
2363 off_end = failed_at;
2364 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002365 off_end = (void *)off_start + buffer->offsets_size;
2366 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002367 struct binder_object_header *hdr;
2368 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002369
Martijn Coenen00c80372016-07-13 12:06:49 +02002370 if (object_size == 0) {
2371 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002372 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002373 continue;
2374 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002375 hdr = (struct binder_object_header *)(buffer->data + *offp);
2376 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002377 case BINDER_TYPE_BINDER:
2378 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002379 struct flat_binder_object *fp;
2380 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002381
Martijn Coenen00c80372016-07-13 12:06:49 +02002382 fp = to_flat_binder_object(hdr);
2383 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002384 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002385 pr_err("transaction release %d bad node %016llx\n",
2386 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002387 break;
2388 }
2389 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002390 " node %d u%016llx\n",
2391 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002392 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2393 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002394 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002395 } break;
2396 case BINDER_TYPE_HANDLE:
2397 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002398 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002399 struct binder_ref_data rdata;
2400 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002401
Martijn Coenen00c80372016-07-13 12:06:49 +02002402 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002403 ret = binder_dec_ref_for_handle(proc, fp->handle,
2404 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2405
2406 if (ret) {
2407 pr_err("transaction release %d bad handle %d, ret = %d\n",
2408 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002409 break;
2410 }
2411 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002412 " ref %d desc %d\n",
2413 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002414 } break;
2415
Martijn Coenen00c80372016-07-13 12:06:49 +02002416 case BINDER_TYPE_FD: {
2417 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2418
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002419 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002420 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002421 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002422 task_close_fd(proc, fp->fd);
2423 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002424 case BINDER_TYPE_PTR:
2425 /*
2426 * Nothing to do here, this will get cleaned up when the
2427 * transaction buffer gets freed
2428 */
2429 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002430 case BINDER_TYPE_FDA: {
2431 struct binder_fd_array_object *fda;
2432 struct binder_buffer_object *parent;
2433 uintptr_t parent_buffer;
2434 u32 *fd_array;
2435 size_t fd_index;
2436 binder_size_t fd_buf_size;
2437
2438 fda = to_binder_fd_array_object(hdr);
2439 parent = binder_validate_ptr(buffer, fda->parent,
2440 off_start,
2441 offp - off_start);
2442 if (!parent) {
2443 pr_err("transaction release %d bad parent offset",
2444 debug_id);
2445 continue;
2446 }
2447 /*
2448 * Since the parent was already fixed up, convert it
2449 * back to kernel address space to access it
2450 */
2451 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002452 binder_alloc_get_user_buffer_offset(
2453 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002454
2455 fd_buf_size = sizeof(u32) * fda->num_fds;
2456 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2457 pr_err("transaction release %d invalid number of fds (%lld)\n",
2458 debug_id, (u64)fda->num_fds);
2459 continue;
2460 }
2461 if (fd_buf_size > parent->length ||
2462 fda->parent_offset > parent->length - fd_buf_size) {
2463 /* No space for all file descriptors here. */
2464 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2465 debug_id, (u64)fda->num_fds);
2466 continue;
2467 }
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002468 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002469 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2470 task_close_fd(proc, fd_array[fd_index]);
2471 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002472 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002473 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002474 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002475 break;
2476 }
2477 }
2478}
2479
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002480static int binder_translate_binder(struct flat_binder_object *fp,
2481 struct binder_transaction *t,
2482 struct binder_thread *thread)
2483{
2484 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002485 struct binder_proc *proc = thread->proc;
2486 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002487 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002488 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002489
2490 node = binder_get_node(proc, fp->binder);
2491 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002492 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002493 if (!node)
2494 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002495 }
2496 if (fp->cookie != node->cookie) {
2497 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2498 proc->pid, thread->pid, (u64)fp->binder,
2499 node->debug_id, (u64)fp->cookie,
2500 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002501 ret = -EINVAL;
2502 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002503 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002504 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2505 ret = -EPERM;
2506 goto done;
2507 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002508
Todd Kjosb0117bb2017-05-08 09:16:27 -07002509 ret = binder_inc_ref_for_node(target_proc, node,
2510 fp->hdr.type == BINDER_TYPE_BINDER,
2511 &thread->todo, &rdata);
2512 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002513 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002514
2515 if (fp->hdr.type == BINDER_TYPE_BINDER)
2516 fp->hdr.type = BINDER_TYPE_HANDLE;
2517 else
2518 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2519 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002520 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002521 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002522
Todd Kjosb0117bb2017-05-08 09:16:27 -07002523 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002524 binder_debug(BINDER_DEBUG_TRANSACTION,
2525 " node %d u%016llx -> ref %d desc %d\n",
2526 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002527 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002528done:
2529 binder_put_node(node);
2530 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002531}
2532
2533static int binder_translate_handle(struct flat_binder_object *fp,
2534 struct binder_transaction *t,
2535 struct binder_thread *thread)
2536{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002537 struct binder_proc *proc = thread->proc;
2538 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002539 struct binder_node *node;
2540 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002541 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002542
Todd Kjosb0117bb2017-05-08 09:16:27 -07002543 node = binder_get_node_from_ref(proc, fp->handle,
2544 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2545 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002546 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2547 proc->pid, thread->pid, fp->handle);
2548 return -EINVAL;
2549 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002550 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2551 ret = -EPERM;
2552 goto done;
2553 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002554
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002555 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002556 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002557 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2558 fp->hdr.type = BINDER_TYPE_BINDER;
2559 else
2560 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002561 fp->binder = node->ptr;
2562 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002563 if (node->proc)
2564 binder_inner_proc_lock(node->proc);
2565 binder_inc_node_nilocked(node,
2566 fp->hdr.type == BINDER_TYPE_BINDER,
2567 0, NULL);
2568 if (node->proc)
2569 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002570 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002571 binder_debug(BINDER_DEBUG_TRANSACTION,
2572 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002573 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2574 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002575 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002576 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002577 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002578
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002579 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002580 ret = binder_inc_ref_for_node(target_proc, node,
2581 fp->hdr.type == BINDER_TYPE_HANDLE,
2582 NULL, &dest_rdata);
2583 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002584 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002585
2586 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002587 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002588 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002589 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2590 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002591 binder_debug(BINDER_DEBUG_TRANSACTION,
2592 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002593 src_rdata.debug_id, src_rdata.desc,
2594 dest_rdata.debug_id, dest_rdata.desc,
2595 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002596 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002597done:
2598 binder_put_node(node);
2599 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002600}
2601
2602static int binder_translate_fd(int fd,
2603 struct binder_transaction *t,
2604 struct binder_thread *thread,
2605 struct binder_transaction *in_reply_to)
2606{
2607 struct binder_proc *proc = thread->proc;
2608 struct binder_proc *target_proc = t->to_proc;
2609 int target_fd;
2610 struct file *file;
2611 int ret;
2612 bool target_allows_fd;
2613
2614 if (in_reply_to)
2615 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2616 else
2617 target_allows_fd = t->buffer->target_node->accept_fds;
2618 if (!target_allows_fd) {
2619 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2620 proc->pid, thread->pid,
2621 in_reply_to ? "reply" : "transaction",
2622 fd);
2623 ret = -EPERM;
2624 goto err_fd_not_accepted;
2625 }
2626
2627 file = fget(fd);
2628 if (!file) {
2629 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2630 proc->pid, thread->pid, fd);
2631 ret = -EBADF;
2632 goto err_fget;
2633 }
2634 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2635 if (ret < 0) {
2636 ret = -EPERM;
2637 goto err_security;
2638 }
2639
2640 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2641 if (target_fd < 0) {
2642 ret = -ENOMEM;
2643 goto err_get_unused_fd;
2644 }
2645 task_fd_install(target_proc, target_fd, file);
2646 trace_binder_transaction_fd(t, fd, target_fd);
2647 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2648 fd, target_fd);
2649
2650 return target_fd;
2651
2652err_get_unused_fd:
2653err_security:
2654 fput(file);
2655err_fget:
2656err_fd_not_accepted:
2657 return ret;
2658}
2659
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002660static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2661 struct binder_buffer_object *parent,
2662 struct binder_transaction *t,
2663 struct binder_thread *thread,
2664 struct binder_transaction *in_reply_to)
2665{
2666 binder_size_t fdi, fd_buf_size, num_installed_fds;
2667 int target_fd;
2668 uintptr_t parent_buffer;
2669 u32 *fd_array;
2670 struct binder_proc *proc = thread->proc;
2671 struct binder_proc *target_proc = t->to_proc;
2672
2673 fd_buf_size = sizeof(u32) * fda->num_fds;
2674 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2675 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2676 proc->pid, thread->pid, (u64)fda->num_fds);
2677 return -EINVAL;
2678 }
2679 if (fd_buf_size > parent->length ||
2680 fda->parent_offset > parent->length - fd_buf_size) {
2681 /* No space for all file descriptors here. */
2682 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2683 proc->pid, thread->pid, (u64)fda->num_fds);
2684 return -EINVAL;
2685 }
2686 /*
2687 * Since the parent was already fixed up, convert it
2688 * back to the kernel address space to access it
2689 */
Todd Kjosd325d372016-10-10 10:40:53 -07002690 parent_buffer = parent->buffer -
2691 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002692 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002693 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2694 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2695 proc->pid, thread->pid);
2696 return -EINVAL;
2697 }
2698 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2699 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2700 in_reply_to);
2701 if (target_fd < 0)
2702 goto err_translate_fd_failed;
2703 fd_array[fdi] = target_fd;
2704 }
2705 return 0;
2706
2707err_translate_fd_failed:
2708 /*
2709 * Failed to allocate fd or security error, free fds
2710 * installed so far.
2711 */
2712 num_installed_fds = fdi;
2713 for (fdi = 0; fdi < num_installed_fds; fdi++)
2714 task_close_fd(target_proc, fd_array[fdi]);
2715 return target_fd;
2716}
2717
Martijn Coenen5a6da532016-09-30 14:10:07 +02002718static int binder_fixup_parent(struct binder_transaction *t,
2719 struct binder_thread *thread,
2720 struct binder_buffer_object *bp,
2721 binder_size_t *off_start,
2722 binder_size_t num_valid,
2723 struct binder_buffer_object *last_fixup_obj,
2724 binder_size_t last_fixup_min_off)
2725{
2726 struct binder_buffer_object *parent;
2727 u8 *parent_buffer;
2728 struct binder_buffer *b = t->buffer;
2729 struct binder_proc *proc = thread->proc;
2730 struct binder_proc *target_proc = t->to_proc;
2731
2732 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2733 return 0;
2734
2735 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2736 if (!parent) {
2737 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2738 proc->pid, thread->pid);
2739 return -EINVAL;
2740 }
2741
2742 if (!binder_validate_fixup(b, off_start,
2743 parent, bp->parent_offset,
2744 last_fixup_obj,
2745 last_fixup_min_off)) {
2746 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2747 proc->pid, thread->pid);
2748 return -EINVAL;
2749 }
2750
2751 if (parent->length < sizeof(binder_uintptr_t) ||
2752 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2753 /* No space for a pointer here! */
2754 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2755 proc->pid, thread->pid);
2756 return -EINVAL;
2757 }
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002758 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002759 binder_alloc_get_user_buffer_offset(
2760 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002761 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2762
2763 return 0;
2764}
2765
Martijn Coenen053be422017-06-06 15:17:46 -07002766/**
2767 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2768 * @t: transaction to send
2769 * @proc: process to send the transaction to
2770 * @thread: thread in @proc to send the transaction to (may be NULL)
2771 *
2772 * This function queues a transaction to the specified process. It will try
2773 * to find a thread in the target process to handle the transaction and
2774 * wake it up. If no thread is found, the work is queued to the proc
2775 * waitqueue.
2776 *
2777 * If the @thread parameter is not NULL, the transaction is always queued
2778 * to the waitlist of that specific thread.
2779 *
2780 * Return: true if the transactions was successfully queued
2781 * false if the target process or thread is dead
2782 */
2783static bool binder_proc_transaction(struct binder_transaction *t,
2784 struct binder_proc *proc,
2785 struct binder_thread *thread)
2786{
Martijn Coenen053be422017-06-06 15:17:46 -07002787 struct binder_node *node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002788 struct binder_priority node_prio;
Martijn Coenen053be422017-06-06 15:17:46 -07002789 bool oneway = !!(t->flags & TF_ONE_WAY);
Martijn Coenen1af61802017-10-19 15:04:46 +02002790 bool pending_async = false;
Martijn Coenen053be422017-06-06 15:17:46 -07002791
2792 BUG_ON(!node);
2793 binder_node_lock(node);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002794 node_prio.prio = node->min_priority;
2795 node_prio.sched_policy = node->sched_policy;
2796
Martijn Coenen053be422017-06-06 15:17:46 -07002797 if (oneway) {
2798 BUG_ON(thread);
2799 if (node->has_async_transaction) {
Martijn Coenen1af61802017-10-19 15:04:46 +02002800 pending_async = true;
Martijn Coenen053be422017-06-06 15:17:46 -07002801 } else {
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -06002802 node->has_async_transaction = true;
Martijn Coenen053be422017-06-06 15:17:46 -07002803 }
2804 }
2805
2806 binder_inner_proc_lock(proc);
2807
2808 if (proc->is_dead || (thread && thread->is_dead)) {
2809 binder_inner_proc_unlock(proc);
2810 binder_node_unlock(node);
2811 return false;
2812 }
2813
Martijn Coenen1af61802017-10-19 15:04:46 +02002814 if (!thread && !pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002815 thread = binder_select_thread_ilocked(proc);
2816
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002817 if (thread) {
Martijn Coenenc46810c2017-06-23 10:13:43 -07002818 binder_transaction_priority(thread->task, t, node_prio,
2819 node->inherit_rt);
Martijn Coenen1af61802017-10-19 15:04:46 +02002820 binder_enqueue_thread_work_ilocked(thread, &t->work);
2821 } else if (!pending_async) {
2822 binder_enqueue_work_ilocked(&t->work, &proc->todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002823 } else {
Martijn Coenen1af61802017-10-19 15:04:46 +02002824 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002825 }
Martijn Coenen053be422017-06-06 15:17:46 -07002826
Martijn Coenen1af61802017-10-19 15:04:46 +02002827 if (!pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002828 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2829
2830 binder_inner_proc_unlock(proc);
2831 binder_node_unlock(node);
2832
2833 return true;
2834}
2835
Todd Kjos291d9682017-09-25 08:55:09 -07002836/**
2837 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2838 * @node: struct binder_node for which to get refs
2839 * @proc: returns @node->proc if valid
2840 * @error: if no @proc then returns BR_DEAD_REPLY
2841 *
2842 * User-space normally keeps the node alive when creating a transaction
2843 * since it has a reference to the target. The local strong ref keeps it
2844 * alive if the sending process dies before the target process processes
2845 * the transaction. If the source process is malicious or has a reference
2846 * counting bug, relying on the local strong ref can fail.
2847 *
2848 * Since user-space can cause the local strong ref to go away, we also take
2849 * a tmpref on the node to ensure it survives while we are constructing
2850 * the transaction. We also need a tmpref on the proc while we are
2851 * constructing the transaction, so we take that here as well.
2852 *
2853 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2854 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2855 * target proc has died, @error is set to BR_DEAD_REPLY
2856 */
2857static struct binder_node *binder_get_node_refs_for_txn(
2858 struct binder_node *node,
2859 struct binder_proc **procp,
2860 uint32_t *error)
2861{
2862 struct binder_node *target_node = NULL;
2863
2864 binder_node_inner_lock(node);
2865 if (node->proc) {
2866 target_node = node;
2867 binder_inc_node_nilocked(node, 1, 0, NULL);
2868 binder_inc_node_tmpref_ilocked(node);
2869 node->proc->tmp_ref++;
2870 *procp = node->proc;
2871 } else
2872 *error = BR_DEAD_REPLY;
2873 binder_node_inner_unlock(node);
2874
2875 return target_node;
2876}
2877
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002878static void binder_transaction(struct binder_proc *proc,
2879 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002880 struct binder_transaction_data *tr, int reply,
2881 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002882{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002883 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002884 struct binder_transaction *t;
2885 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002886 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002887 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002888 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002889 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002890 struct binder_thread *target_thread = NULL;
2891 struct binder_node *target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002892 struct binder_transaction *in_reply_to = NULL;
2893 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002894 uint32_t return_error = 0;
2895 uint32_t return_error_param = 0;
2896 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002897 struct binder_buffer_object *last_fixup_obj = NULL;
2898 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002899 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002900 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002901
2902 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002903 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002904 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2905 e->from_proc = proc->pid;
2906 e->from_thread = thread->pid;
2907 e->target_handle = tr->target.handle;
2908 e->data_size = tr->data_size;
2909 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002910 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002911
2912 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002913 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002914 in_reply_to = thread->transaction_stack;
2915 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002916 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302917 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002918 proc->pid, thread->pid);
2919 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002920 return_error_param = -EPROTO;
2921 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002922 goto err_empty_call_stack;
2923 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002924 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002925 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302926 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002927 proc->pid, thread->pid, in_reply_to->debug_id,
2928 in_reply_to->to_proc ?
2929 in_reply_to->to_proc->pid : 0,
2930 in_reply_to->to_thread ?
2931 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002932 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002933 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002934 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002935 return_error_param = -EPROTO;
2936 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002937 in_reply_to = NULL;
2938 goto err_bad_call_stack;
2939 }
2940 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002941 binder_inner_proc_unlock(proc);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002942 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002943 if (target_thread == NULL) {
2944 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002945 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002946 goto err_dead_binder;
2947 }
2948 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302949 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002950 proc->pid, thread->pid,
2951 target_thread->transaction_stack ?
2952 target_thread->transaction_stack->debug_id : 0,
2953 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002954 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002955 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002956 return_error_param = -EPROTO;
2957 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002958 in_reply_to = NULL;
2959 target_thread = NULL;
2960 goto err_dead_binder;
2961 }
2962 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002963 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002964 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002965 } else {
2966 if (tr->target.handle) {
2967 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002968
Todd Kjosc37162d2017-05-26 11:56:29 -07002969 /*
2970 * There must already be a strong ref
2971 * on this node. If so, do a strong
2972 * increment on the node to ensure it
2973 * stays alive until the transaction is
2974 * done.
2975 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002976 binder_proc_lock(proc);
2977 ref = binder_get_ref_olocked(proc, tr->target.handle,
2978 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002979 if (ref) {
Todd Kjos291d9682017-09-25 08:55:09 -07002980 target_node = binder_get_node_refs_for_txn(
2981 ref->node, &target_proc,
2982 &return_error);
2983 } else {
2984 binder_user_error("%d:%d got transaction to invalid handle\n",
2985 proc->pid, thread->pid);
2986 return_error = BR_FAILED_REPLY;
Todd Kjosc37162d2017-05-26 11:56:29 -07002987 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002988 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002989 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002990 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002991 target_node = context->binder_context_mgr_node;
Todd Kjos291d9682017-09-25 08:55:09 -07002992 if (target_node)
2993 target_node = binder_get_node_refs_for_txn(
2994 target_node, &target_proc,
2995 &return_error);
2996 else
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002997 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002998 mutex_unlock(&context->context_mgr_node_lock);
Martijn Coenenc4048b22018-03-28 11:14:50 +02002999 if (target_node && target_proc == proc) {
3000 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3001 proc->pid, thread->pid);
3002 return_error = BR_FAILED_REPLY;
3003 return_error_param = -EINVAL;
3004 return_error_line = __LINE__;
3005 goto err_invalid_target_handle;
3006 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003007 }
Todd Kjos291d9682017-09-25 08:55:09 -07003008 if (!target_node) {
3009 /*
3010 * return_error is set above
3011 */
3012 return_error_param = -EINVAL;
Todd Kjose598d172017-03-22 17:19:52 -07003013 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003014 goto err_dead_binder;
3015 }
Todd Kjos291d9682017-09-25 08:55:09 -07003016 e->to_node = target_node->debug_id;
Stephen Smalley79af7302015-01-21 10:54:10 -05003017 if (security_binder_transaction(proc->tsk,
3018 target_proc->tsk) < 0) {
3019 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003020 return_error_param = -EPERM;
3021 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05003022 goto err_invalid_target_handle;
3023 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003024 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003025 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3026 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003027
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003028 tmp = thread->transaction_stack;
3029 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003030 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05303031 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003032 proc->pid, thread->pid, tmp->debug_id,
3033 tmp->to_proc ? tmp->to_proc->pid : 0,
3034 tmp->to_thread ?
3035 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07003036 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003037 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003038 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003039 return_error_param = -EPROTO;
3040 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003041 goto err_bad_call_stack;
3042 }
3043 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003044 struct binder_thread *from;
3045
3046 spin_lock(&tmp->lock);
3047 from = tmp->from;
3048 if (from && from->proc == target_proc) {
3049 atomic_inc(&from->tmp_ref);
3050 target_thread = from;
3051 spin_unlock(&tmp->lock);
3052 break;
3053 }
3054 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003055 tmp = tmp->from_parent;
3056 }
3057 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003058 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003059 }
Martijn Coenen053be422017-06-06 15:17:46 -07003060 if (target_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003061 e->to_thread = target_thread->pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003062 e->to_proc = target_proc->pid;
3063
3064 /* TODO: reuse incoming transaction for reply */
3065 t = kzalloc(sizeof(*t), GFP_KERNEL);
3066 if (t == NULL) {
3067 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003068 return_error_param = -ENOMEM;
3069 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003070 goto err_alloc_t_failed;
3071 }
3072 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07003073 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003074
3075 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3076 if (tcomplete == NULL) {
3077 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003078 return_error_param = -ENOMEM;
3079 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003080 goto err_alloc_tcomplete_failed;
3081 }
3082 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3083
Todd Kjos1cfe6272017-05-24 13:33:28 -07003084 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003085
3086 if (reply)
3087 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003088 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003089 proc->pid, thread->pid, t->debug_id,
3090 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003091 (u64)tr->data.ptr.buffer,
3092 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003093 (u64)tr->data_size, (u64)tr->offsets_size,
3094 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003095 else
3096 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003097 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003098 proc->pid, thread->pid, t->debug_id,
3099 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003100 (u64)tr->data.ptr.buffer,
3101 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003102 (u64)tr->data_size, (u64)tr->offsets_size,
3103 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003104
3105 if (!reply && !(tr->flags & TF_ONE_WAY))
3106 t->from = thread;
3107 else
3108 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03003109 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003110 t->to_proc = target_proc;
3111 t->to_thread = target_thread;
3112 t->code = tr->code;
3113 t->flags = tr->flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07003114 if (!(t->flags & TF_ONE_WAY) &&
3115 binder_supported_policy(current->policy)) {
3116 /* Inherit supported policies for synchronous transactions */
3117 t->priority.sched_policy = current->policy;
3118 t->priority.prio = current->normal_prio;
3119 } else {
3120 /* Otherwise, fall back to the default priority */
3121 t->priority = target_proc->default_priority;
3122 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003123
3124 trace_binder_transaction(reply, t, target_node);
3125
Todd Kjosd325d372016-10-10 10:40:53 -07003126 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02003127 tr->offsets_size, extra_buffers_size,
3128 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07003129 if (IS_ERR(t->buffer)) {
3130 /*
3131 * -ESRCH indicates VMA cleared. The target is dying.
3132 */
3133 return_error_param = PTR_ERR(t->buffer);
3134 return_error = return_error_param == -ESRCH ?
3135 BR_DEAD_REPLY : BR_FAILED_REPLY;
3136 return_error_line = __LINE__;
3137 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003138 goto err_binder_alloc_buf_failed;
3139 }
3140 t->buffer->allow_user_free = 0;
3141 t->buffer->debug_id = t->debug_id;
3142 t->buffer->transaction = t;
3143 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003144 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003145 off_start = (binder_size_t *)(t->buffer->data +
3146 ALIGN(tr->data_size, sizeof(void *)));
3147 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003148
Arve Hjønnevågda498892014-02-21 14:40:26 -08003149 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3150 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303151 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3152 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003153 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003154 return_error_param = -EFAULT;
3155 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003156 goto err_copy_data_failed;
3157 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003158 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3159 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303160 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3161 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003162 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003163 return_error_param = -EFAULT;
3164 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003165 goto err_copy_data_failed;
3166 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003167 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3168 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3169 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003170 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003171 return_error_param = -EINVAL;
3172 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003173 goto err_bad_offset;
3174 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02003175 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3176 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3177 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05303178 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003179 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003180 return_error_param = -EINVAL;
3181 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003182 goto err_bad_offset;
3183 }
3184 off_end = (void *)off_start + tr->offsets_size;
3185 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3186 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003187 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003188 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02003189 struct binder_object_header *hdr;
3190 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09003191
Martijn Coenen00c80372016-07-13 12:06:49 +02003192 if (object_size == 0 || *offp < off_min) {
3193 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003194 proc->pid, thread->pid, (u64)*offp,
3195 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02003196 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003197 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003198 return_error_param = -EINVAL;
3199 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003200 goto err_bad_offset;
3201 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003202
3203 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3204 off_min = *offp + object_size;
3205 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003206 case BINDER_TYPE_BINDER:
3207 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003208 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003209
Martijn Coenen00c80372016-07-13 12:06:49 +02003210 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003211 ret = binder_translate_binder(fp, t, thread);
3212 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02003213 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003214 return_error_param = ret;
3215 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003216 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003217 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003218 } break;
3219 case BINDER_TYPE_HANDLE:
3220 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003221 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003222
Martijn Coenen00c80372016-07-13 12:06:49 +02003223 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003224 ret = binder_translate_handle(fp, t, thread);
3225 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003226 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003227 return_error_param = ret;
3228 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003229 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003230 }
3231 } break;
3232
3233 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003234 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003235 int target_fd = binder_translate_fd(fp->fd, t, thread,
3236 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003237
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003238 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003239 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003240 return_error_param = target_fd;
3241 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003242 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003243 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003244 fp->pad_binder = 0;
3245 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003246 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003247 case BINDER_TYPE_FDA: {
3248 struct binder_fd_array_object *fda =
3249 to_binder_fd_array_object(hdr);
3250 struct binder_buffer_object *parent =
3251 binder_validate_ptr(t->buffer, fda->parent,
3252 off_start,
3253 offp - off_start);
3254 if (!parent) {
3255 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3256 proc->pid, thread->pid);
3257 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003258 return_error_param = -EINVAL;
3259 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003260 goto err_bad_parent;
3261 }
3262 if (!binder_validate_fixup(t->buffer, off_start,
3263 parent, fda->parent_offset,
3264 last_fixup_obj,
3265 last_fixup_min_off)) {
3266 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3267 proc->pid, thread->pid);
3268 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003269 return_error_param = -EINVAL;
3270 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003271 goto err_bad_parent;
3272 }
3273 ret = binder_translate_fd_array(fda, parent, t, thread,
3274 in_reply_to);
3275 if (ret < 0) {
3276 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003277 return_error_param = ret;
3278 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003279 goto err_translate_failed;
3280 }
3281 last_fixup_obj = parent;
3282 last_fixup_min_off =
3283 fda->parent_offset + sizeof(u32) * fda->num_fds;
3284 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003285 case BINDER_TYPE_PTR: {
3286 struct binder_buffer_object *bp =
3287 to_binder_buffer_object(hdr);
3288 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003289
Martijn Coenen5a6da532016-09-30 14:10:07 +02003290 if (bp->length > buf_left) {
3291 binder_user_error("%d:%d got transaction with too large buffer\n",
3292 proc->pid, thread->pid);
3293 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003294 return_error_param = -EINVAL;
3295 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003296 goto err_bad_offset;
3297 }
3298 if (copy_from_user(sg_bufp,
3299 (const void __user *)(uintptr_t)
3300 bp->buffer, bp->length)) {
3301 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3302 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07003303 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003304 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003305 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003306 goto err_copy_data_failed;
3307 }
3308 /* Fixup buffer pointer to target proc address space */
3309 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07003310 binder_alloc_get_user_buffer_offset(
3311 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003312 sg_bufp += ALIGN(bp->length, sizeof(u64));
3313
3314 ret = binder_fixup_parent(t, thread, bp, off_start,
3315 offp - off_start,
3316 last_fixup_obj,
3317 last_fixup_min_off);
3318 if (ret < 0) {
3319 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003320 return_error_param = ret;
3321 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003322 goto err_translate_failed;
3323 }
3324 last_fixup_obj = bp;
3325 last_fixup_min_off = 0;
3326 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003327 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01003328 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02003329 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003330 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003331 return_error_param = -EINVAL;
3332 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003333 goto err_bad_object_type;
3334 }
3335 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003336 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003337 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003338
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003339 if (reply) {
Martijn Coenen1af61802017-10-19 15:04:46 +02003340 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003341 binder_inner_proc_lock(target_proc);
3342 if (target_thread->is_dead) {
3343 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003344 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003345 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003346 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003347 binder_pop_transaction_ilocked(target_thread, in_reply_to);
Martijn Coenen1af61802017-10-19 15:04:46 +02003348 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003349 binder_inner_proc_unlock(target_proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003350 wake_up_interruptible_sync(&target_thread->wait);
Martijn Coenenecd972d2017-05-26 10:48:56 -07003351 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003352 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003353 } else if (!(t->flags & TF_ONE_WAY)) {
3354 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003355 binder_inner_proc_lock(proc);
Martijn Coenendac2e9c2017-11-13 09:55:21 +01003356 /*
3357 * Defer the TRANSACTION_COMPLETE, so we don't return to
3358 * userspace immediately; this allows the target process to
3359 * immediately start processing this transaction, reducing
3360 * latency. We will then return the TRANSACTION_COMPLETE when
3361 * the target replies (or there is an error).
3362 */
3363 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003364 t->need_reply = 1;
3365 t->from_parent = thread->transaction_stack;
3366 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003367 binder_inner_proc_unlock(proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003368 if (!binder_proc_transaction(t, target_proc, target_thread)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003369 binder_inner_proc_lock(proc);
3370 binder_pop_transaction_ilocked(thread, t);
3371 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003372 goto err_dead_proc_or_thread;
3373 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003374 } else {
3375 BUG_ON(target_node == NULL);
3376 BUG_ON(t->buffer->async_transaction != 1);
Martijn Coenen1af61802017-10-19 15:04:46 +02003377 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen053be422017-06-06 15:17:46 -07003378 if (!binder_proc_transaction(t, target_proc, NULL))
Todd Kjos2f993e22017-05-12 14:42:55 -07003379 goto err_dead_proc_or_thread;
Riley Andrewsb5968812015-09-01 12:42:07 -07003380 }
Todd Kjos2f993e22017-05-12 14:42:55 -07003381 if (target_thread)
3382 binder_thread_dec_tmpref(target_thread);
3383 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003384 if (target_node)
3385 binder_dec_node_tmpref(target_node);
Todd Kjos1cfe6272017-05-24 13:33:28 -07003386 /*
3387 * write barrier to synchronize with initialization
3388 * of log entry
3389 */
3390 smp_wmb();
3391 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003392 return;
3393
Todd Kjos2f993e22017-05-12 14:42:55 -07003394err_dead_proc_or_thread:
3395 return_error = BR_DEAD_REPLY;
3396 return_error_line = __LINE__;
Xu YiPing86578a02017-05-22 11:26:23 -07003397 binder_dequeue_work(proc, tcomplete);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003398err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003399err_bad_object_type:
3400err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003401err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003402err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003403 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003404 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjos291d9682017-09-25 08:55:09 -07003405 if (target_node)
3406 binder_dec_node_tmpref(target_node);
Todd Kjosc37162d2017-05-26 11:56:29 -07003407 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003408 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07003409 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003410err_binder_alloc_buf_failed:
3411 kfree(tcomplete);
3412 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3413err_alloc_tcomplete_failed:
3414 kfree(t);
3415 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3416err_alloc_t_failed:
3417err_bad_call_stack:
3418err_empty_call_stack:
3419err_dead_binder:
3420err_invalid_target_handle:
Todd Kjos2f993e22017-05-12 14:42:55 -07003421 if (target_thread)
3422 binder_thread_dec_tmpref(target_thread);
3423 if (target_proc)
3424 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003425 if (target_node) {
Todd Kjosc37162d2017-05-26 11:56:29 -07003426 binder_dec_node(target_node, 1, 0);
Todd Kjos291d9682017-09-25 08:55:09 -07003427 binder_dec_node_tmpref(target_node);
3428 }
Todd Kjosc37162d2017-05-26 11:56:29 -07003429
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003430 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07003431 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3432 proc->pid, thread->pid, return_error, return_error_param,
3433 (u64)tr->data_size, (u64)tr->offsets_size,
3434 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003435
3436 {
3437 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09003438
Todd Kjose598d172017-03-22 17:19:52 -07003439 e->return_error = return_error;
3440 e->return_error_param = return_error_param;
3441 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003442 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3443 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07003444 /*
3445 * write barrier to synchronize with initialization
3446 * of log entry
3447 */
3448 smp_wmb();
3449 WRITE_ONCE(e->debug_id_done, t_debug_id);
3450 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003451 }
3452
Todd Kjos858b8da2017-04-21 17:35:12 -07003453 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003454 if (in_reply_to) {
Martijn Coenenecd972d2017-05-26 10:48:56 -07003455 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos858b8da2017-04-21 17:35:12 -07003456 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Martijn Coenen1af61802017-10-19 15:04:46 +02003457 binder_enqueue_thread_work(thread, &thread->return_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003458 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07003459 } else {
3460 thread->return_error.cmd = return_error;
Martijn Coenen1af61802017-10-19 15:04:46 +02003461 binder_enqueue_thread_work(thread, &thread->return_error.work);
Todd Kjos858b8da2017-04-21 17:35:12 -07003462 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003463}
3464
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003465static int binder_thread_write(struct binder_proc *proc,
3466 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003467 binder_uintptr_t binder_buffer, size_t size,
3468 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003469{
3470 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003471 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003472 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003473 void __user *ptr = buffer + *consumed;
3474 void __user *end = buffer + size;
3475
Todd Kjos858b8da2017-04-21 17:35:12 -07003476 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003477 int ret;
3478
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003479 if (get_user(cmd, (uint32_t __user *)ptr))
3480 return -EFAULT;
3481 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003482 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003483 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003484 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3485 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3486 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003487 }
3488 switch (cmd) {
3489 case BC_INCREFS:
3490 case BC_ACQUIRE:
3491 case BC_RELEASE:
3492 case BC_DECREFS: {
3493 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003494 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003495 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3496 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3497 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003498
3499 if (get_user(target, (uint32_t __user *)ptr))
3500 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003501
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003502 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003503 ret = -1;
3504 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003505 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003506 mutex_lock(&context->context_mgr_node_lock);
3507 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003508 if (ctx_mgr_node)
3509 ret = binder_inc_ref_for_node(
3510 proc, ctx_mgr_node,
3511 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003512 mutex_unlock(&context->context_mgr_node_lock);
3513 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003514 if (ret)
3515 ret = binder_update_ref_for_handle(
3516 proc, target, increment, strong,
3517 &rdata);
3518 if (!ret && rdata.desc != target) {
3519 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3520 proc->pid, thread->pid,
3521 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003522 }
3523 switch (cmd) {
3524 case BC_INCREFS:
3525 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003526 break;
3527 case BC_ACQUIRE:
3528 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003529 break;
3530 case BC_RELEASE:
3531 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003532 break;
3533 case BC_DECREFS:
3534 default:
3535 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003536 break;
3537 }
3538 if (ret) {
3539 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3540 proc->pid, thread->pid, debug_string,
3541 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003542 break;
3543 }
3544 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003545 "%d:%d %s ref %d desc %d s %d w %d\n",
3546 proc->pid, thread->pid, debug_string,
3547 rdata.debug_id, rdata.desc, rdata.strong,
3548 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003549 break;
3550 }
3551 case BC_INCREFS_DONE:
3552 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003553 binder_uintptr_t node_ptr;
3554 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003555 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003556 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003557
Arve Hjønnevågda498892014-02-21 14:40:26 -08003558 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003559 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003560 ptr += sizeof(binder_uintptr_t);
3561 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003562 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003563 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003564 node = binder_get_node(proc, node_ptr);
3565 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003566 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003567 proc->pid, thread->pid,
3568 cmd == BC_INCREFS_DONE ?
3569 "BC_INCREFS_DONE" :
3570 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003571 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003572 break;
3573 }
3574 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003575 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003576 proc->pid, thread->pid,
3577 cmd == BC_INCREFS_DONE ?
3578 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003579 (u64)node_ptr, node->debug_id,
3580 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003581 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003582 break;
3583 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003584 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003585 if (cmd == BC_ACQUIRE_DONE) {
3586 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303587 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003588 proc->pid, thread->pid,
3589 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003590 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003591 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003592 break;
3593 }
3594 node->pending_strong_ref = 0;
3595 } else {
3596 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303597 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003598 proc->pid, thread->pid,
3599 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003600 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003601 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003602 break;
3603 }
3604 node->pending_weak_ref = 0;
3605 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003606 free_node = binder_dec_node_nilocked(node,
3607 cmd == BC_ACQUIRE_DONE, 0);
3608 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003609 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003610 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003611 proc->pid, thread->pid,
3612 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003613 node->debug_id, node->local_strong_refs,
3614 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003615 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003616 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003617 break;
3618 }
3619 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303620 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003621 return -EINVAL;
3622 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303623 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003624 return -EINVAL;
3625
3626 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003627 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003628 struct binder_buffer *buffer;
3629
Arve Hjønnevågda498892014-02-21 14:40:26 -08003630 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003631 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003632 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003633
Todd Kjos076072a2017-04-21 14:32:11 -07003634 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3635 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003636 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003637 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3638 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003639 break;
3640 }
3641 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003642 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3643 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003644 break;
3645 }
3646 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003647 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3648 proc->pid, thread->pid, (u64)data_ptr,
3649 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003650 buffer->transaction ? "active" : "finished");
3651
3652 if (buffer->transaction) {
3653 buffer->transaction->buffer = NULL;
3654 buffer->transaction = NULL;
3655 }
3656 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003657 struct binder_node *buf_node;
3658 struct binder_work *w;
3659
3660 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003661 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003662 BUG_ON(!buf_node->has_async_transaction);
3663 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003664 w = binder_dequeue_work_head_ilocked(
3665 &buf_node->async_todo);
Martijn Coenen4501c042017-08-10 13:56:16 +02003666 if (!w) {
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -06003667 buf_node->has_async_transaction = false;
Martijn Coenen4501c042017-08-10 13:56:16 +02003668 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003669 binder_enqueue_work_ilocked(
Martijn Coenen4501c042017-08-10 13:56:16 +02003670 w, &proc->todo);
3671 binder_wakeup_proc_ilocked(proc);
3672 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003673 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003674 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003675 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003676 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003677 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003678 break;
3679 }
3680
Martijn Coenen5a6da532016-09-30 14:10:07 +02003681 case BC_TRANSACTION_SG:
3682 case BC_REPLY_SG: {
3683 struct binder_transaction_data_sg tr;
3684
3685 if (copy_from_user(&tr, ptr, sizeof(tr)))
3686 return -EFAULT;
3687 ptr += sizeof(tr);
3688 binder_transaction(proc, thread, &tr.transaction_data,
3689 cmd == BC_REPLY_SG, tr.buffers_size);
3690 break;
3691 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003692 case BC_TRANSACTION:
3693 case BC_REPLY: {
3694 struct binder_transaction_data tr;
3695
3696 if (copy_from_user(&tr, ptr, sizeof(tr)))
3697 return -EFAULT;
3698 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003699 binder_transaction(proc, thread, &tr,
3700 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003701 break;
3702 }
3703
3704 case BC_REGISTER_LOOPER:
3705 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303706 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003707 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003708 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003709 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3710 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303711 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003712 proc->pid, thread->pid);
3713 } else if (proc->requested_threads == 0) {
3714 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303715 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003716 proc->pid, thread->pid);
3717 } else {
3718 proc->requested_threads--;
3719 proc->requested_threads_started++;
3720 }
3721 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003722 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003723 break;
3724 case BC_ENTER_LOOPER:
3725 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303726 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003727 proc->pid, thread->pid);
3728 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3729 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303730 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003731 proc->pid, thread->pid);
3732 }
3733 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3734 break;
3735 case BC_EXIT_LOOPER:
3736 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303737 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003738 proc->pid, thread->pid);
3739 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3740 break;
3741
3742 case BC_REQUEST_DEATH_NOTIFICATION:
3743 case BC_CLEAR_DEATH_NOTIFICATION: {
3744 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003745 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003746 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003747 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003748
3749 if (get_user(target, (uint32_t __user *)ptr))
3750 return -EFAULT;
3751 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003752 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003753 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003754 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003755 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3756 /*
3757 * Allocate memory for death notification
3758 * before taking lock
3759 */
3760 death = kzalloc(sizeof(*death), GFP_KERNEL);
3761 if (death == NULL) {
3762 WARN_ON(thread->return_error.cmd !=
3763 BR_OK);
3764 thread->return_error.cmd = BR_ERROR;
Martijn Coenen1af61802017-10-19 15:04:46 +02003765 binder_enqueue_thread_work(
3766 thread,
3767 &thread->return_error.work);
Todd Kjos5346bf32016-10-20 16:43:34 -07003768 binder_debug(
3769 BINDER_DEBUG_FAILED_TRANSACTION,
3770 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3771 proc->pid, thread->pid);
3772 break;
3773 }
3774 }
3775 binder_proc_lock(proc);
3776 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003777 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303778 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003779 proc->pid, thread->pid,
3780 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3781 "BC_REQUEST_DEATH_NOTIFICATION" :
3782 "BC_CLEAR_DEATH_NOTIFICATION",
3783 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003784 binder_proc_unlock(proc);
3785 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003786 break;
3787 }
3788
3789 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003790 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003791 proc->pid, thread->pid,
3792 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3793 "BC_REQUEST_DEATH_NOTIFICATION" :
3794 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003795 (u64)cookie, ref->data.debug_id,
3796 ref->data.desc, ref->data.strong,
3797 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003798
Martijn Coenenf9eac642017-05-22 11:26:23 -07003799 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003800 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3801 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303802 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003803 proc->pid, thread->pid);
Martijn Coenenf9eac642017-05-22 11:26:23 -07003804 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003805 binder_proc_unlock(proc);
3806 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003807 break;
3808 }
3809 binder_stats_created(BINDER_STAT_DEATH);
3810 INIT_LIST_HEAD(&death->work.entry);
3811 death->cookie = cookie;
3812 ref->death = death;
3813 if (ref->node->proc == NULL) {
3814 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Martijn Coenen3bdbe4c2017-08-10 13:50:52 +02003815
3816 binder_inner_proc_lock(proc);
3817 binder_enqueue_work_ilocked(
3818 &ref->death->work, &proc->todo);
3819 binder_wakeup_proc_ilocked(proc);
3820 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003821 }
3822 } else {
3823 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303824 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003825 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003826 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003827 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003828 break;
3829 }
3830 death = ref->death;
3831 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003832 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003833 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003834 (u64)death->cookie,
3835 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003836 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003837 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003838 break;
3839 }
3840 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003841 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003842 if (list_empty(&death->work.entry)) {
3843 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003844 if (thread->looper &
3845 (BINDER_LOOPER_STATE_REGISTERED |
3846 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003847 binder_enqueue_thread_work_ilocked(
3848 thread,
3849 &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003850 else {
3851 binder_enqueue_work_ilocked(
3852 &death->work,
3853 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003854 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003855 proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003856 }
3857 } else {
3858 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3859 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3860 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003861 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003862 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003863 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003864 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003865 } break;
3866 case BC_DEAD_BINDER_DONE: {
3867 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003868 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003869 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003870
Arve Hjønnevågda498892014-02-21 14:40:26 -08003871 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003872 return -EFAULT;
3873
Lisa Du7a64cd82016-02-17 09:32:52 +08003874 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003875 binder_inner_proc_lock(proc);
3876 list_for_each_entry(w, &proc->delivered_death,
3877 entry) {
3878 struct binder_ref_death *tmp_death =
3879 container_of(w,
3880 struct binder_ref_death,
3881 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003882
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003883 if (tmp_death->cookie == cookie) {
3884 death = tmp_death;
3885 break;
3886 }
3887 }
3888 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Todd Kjosf540ce02018-02-07 13:57:37 -08003889 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003890 proc->pid, thread->pid, (u64)cookie,
3891 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003892 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003893 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3894 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003895 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003896 break;
3897 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003898 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003899 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3900 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003901 if (thread->looper &
3902 (BINDER_LOOPER_STATE_REGISTERED |
3903 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003904 binder_enqueue_thread_work_ilocked(
3905 thread, &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003906 else {
3907 binder_enqueue_work_ilocked(
3908 &death->work,
3909 &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07003910 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003911 }
3912 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003913 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003914 } break;
3915
3916 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303917 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003918 proc->pid, thread->pid, cmd);
3919 return -EINVAL;
3920 }
3921 *consumed = ptr - buffer;
3922 }
3923 return 0;
3924}
3925
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003926static void binder_stat_br(struct binder_proc *proc,
3927 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003928{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003929 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003930 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003931 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3932 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3933 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003934 }
3935}
3936
Todd Kjos60792612017-05-24 10:51:01 -07003937static int binder_put_node_cmd(struct binder_proc *proc,
3938 struct binder_thread *thread,
3939 void __user **ptrp,
3940 binder_uintptr_t node_ptr,
3941 binder_uintptr_t node_cookie,
3942 int node_debug_id,
3943 uint32_t cmd, const char *cmd_name)
3944{
3945 void __user *ptr = *ptrp;
3946
3947 if (put_user(cmd, (uint32_t __user *)ptr))
3948 return -EFAULT;
3949 ptr += sizeof(uint32_t);
3950
3951 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3952 return -EFAULT;
3953 ptr += sizeof(binder_uintptr_t);
3954
3955 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3956 return -EFAULT;
3957 ptr += sizeof(binder_uintptr_t);
3958
3959 binder_stat_br(proc, thread, cmd);
3960 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3961 proc->pid, thread->pid, cmd_name, node_debug_id,
3962 (u64)node_ptr, (u64)node_cookie);
3963
3964 *ptrp = ptr;
3965 return 0;
3966}
3967
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003968static int binder_wait_for_work(struct binder_thread *thread,
3969 bool do_proc_work)
3970{
3971 DEFINE_WAIT(wait);
3972 struct binder_proc *proc = thread->proc;
3973 int ret = 0;
3974
3975 freezer_do_not_count();
3976 binder_inner_proc_lock(proc);
3977 for (;;) {
3978 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3979 if (binder_has_work_ilocked(thread, do_proc_work))
3980 break;
3981 if (do_proc_work)
3982 list_add(&thread->waiting_thread_node,
3983 &proc->waiting_threads);
3984 binder_inner_proc_unlock(proc);
3985 schedule();
3986 binder_inner_proc_lock(proc);
3987 list_del_init(&thread->waiting_thread_node);
3988 if (signal_pending(current)) {
3989 ret = -ERESTARTSYS;
3990 break;
3991 }
3992 }
3993 finish_wait(&thread->wait, &wait);
3994 binder_inner_proc_unlock(proc);
3995 freezer_count();
3996
3997 return ret;
3998}
3999
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004000static int binder_thread_read(struct binder_proc *proc,
4001 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004002 binder_uintptr_t binder_buffer, size_t size,
4003 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004004{
Arve Hjønnevågda498892014-02-21 14:40:26 -08004005 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004006 void __user *ptr = buffer + *consumed;
4007 void __user *end = buffer + size;
4008
4009 int ret = 0;
4010 int wait_for_proc_work;
4011
4012 if (*consumed == 0) {
4013 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4014 return -EFAULT;
4015 ptr += sizeof(uint32_t);
4016 }
4017
4018retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07004019 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004020 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07004021 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004022
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004023 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004024
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004025 trace_binder_wait_for_work(wait_for_proc_work,
4026 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004027 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004028 if (wait_for_proc_work) {
4029 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4030 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304031 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004032 proc->pid, thread->pid, thread->looper);
4033 wait_event_interruptible(binder_user_error_wait,
4034 binder_stop_on_user_error < 2);
4035 }
Martijn Coenenecd972d2017-05-26 10:48:56 -07004036 binder_restore_priority(current, proc->default_priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004037 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004038
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004039 if (non_block) {
4040 if (!binder_has_work(thread, wait_for_proc_work))
4041 ret = -EAGAIN;
4042 } else {
4043 ret = binder_wait_for_work(thread, wait_for_proc_work);
4044 }
4045
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004046 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4047
4048 if (ret)
4049 return ret;
4050
4051 while (1) {
4052 uint32_t cmd;
4053 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004054 struct binder_work *w = NULL;
4055 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004056 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07004057 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004058
Todd Kjose7f23ed2017-03-21 13:06:01 -07004059 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004060 if (!binder_worklist_empty_ilocked(&thread->todo))
4061 list = &thread->todo;
4062 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4063 wait_for_proc_work)
4064 list = &proc->todo;
4065 else {
4066 binder_inner_proc_unlock(proc);
4067
Dmitry Voytik395262a2014-09-08 18:16:34 +04004068 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08004069 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004070 goto retry;
4071 break;
4072 }
4073
Todd Kjose7f23ed2017-03-21 13:06:01 -07004074 if (end - ptr < sizeof(tr) + 4) {
4075 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004076 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004077 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004078 w = binder_dequeue_work_head_ilocked(list);
Martijn Coenen1af61802017-10-19 15:04:46 +02004079 if (binder_worklist_empty_ilocked(&thread->todo))
4080 thread->process_todo = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004081
4082 switch (w->type) {
4083 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004084 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004085 t = container_of(w, struct binder_transaction, work);
4086 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004087 case BINDER_WORK_RETURN_ERROR: {
4088 struct binder_error *e = container_of(
4089 w, struct binder_error, work);
4090
4091 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004092 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07004093 if (put_user(e->cmd, (uint32_t __user *)ptr))
4094 return -EFAULT;
宋金时e1b1a8b2018-05-10 02:05:03 +00004095 cmd = e->cmd;
Todd Kjos858b8da2017-04-21 17:35:12 -07004096 e->cmd = BR_OK;
4097 ptr += sizeof(uint32_t);
4098
4099 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07004100 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004101 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004102 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004103 cmd = BR_TRANSACTION_COMPLETE;
4104 if (put_user(cmd, (uint32_t __user *)ptr))
4105 return -EFAULT;
4106 ptr += sizeof(uint32_t);
4107
4108 binder_stat_br(proc, thread, cmd);
4109 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304110 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004111 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004112 kfree(w);
4113 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4114 } break;
4115 case BINDER_WORK_NODE: {
4116 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07004117 int strong, weak;
4118 binder_uintptr_t node_ptr = node->ptr;
4119 binder_uintptr_t node_cookie = node->cookie;
4120 int node_debug_id = node->debug_id;
4121 int has_weak_ref;
4122 int has_strong_ref;
4123 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09004124
Todd Kjos60792612017-05-24 10:51:01 -07004125 BUG_ON(proc != node->proc);
4126 strong = node->internal_strong_refs ||
4127 node->local_strong_refs;
4128 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07004129 node->local_weak_refs ||
4130 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07004131 has_strong_ref = node->has_strong_ref;
4132 has_weak_ref = node->has_weak_ref;
4133
4134 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004135 node->has_weak_ref = 1;
4136 node->pending_weak_ref = 1;
4137 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004138 }
4139 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004140 node->has_strong_ref = 1;
4141 node->pending_strong_ref = 1;
4142 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004143 }
4144 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004145 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004146 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004147 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004148 if (!weak && !strong) {
4149 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4150 "%d:%d node %d u%016llx c%016llx deleted\n",
4151 proc->pid, thread->pid,
4152 node_debug_id,
4153 (u64)node_ptr,
4154 (u64)node_cookie);
4155 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004156 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004157 binder_node_lock(node);
4158 /*
4159 * Acquire the node lock before freeing the
4160 * node to serialize with other threads that
4161 * may have been holding the node lock while
4162 * decrementing this node (avoids race where
4163 * this thread frees while the other thread
4164 * is unlocking the node after the final
4165 * decrement)
4166 */
4167 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004168 binder_free_node(node);
4169 } else
4170 binder_inner_proc_unlock(proc);
4171
Todd Kjos60792612017-05-24 10:51:01 -07004172 if (weak && !has_weak_ref)
4173 ret = binder_put_node_cmd(
4174 proc, thread, &ptr, node_ptr,
4175 node_cookie, node_debug_id,
4176 BR_INCREFS, "BR_INCREFS");
4177 if (!ret && strong && !has_strong_ref)
4178 ret = binder_put_node_cmd(
4179 proc, thread, &ptr, node_ptr,
4180 node_cookie, node_debug_id,
4181 BR_ACQUIRE, "BR_ACQUIRE");
4182 if (!ret && !strong && has_strong_ref)
4183 ret = binder_put_node_cmd(
4184 proc, thread, &ptr, node_ptr,
4185 node_cookie, node_debug_id,
4186 BR_RELEASE, "BR_RELEASE");
4187 if (!ret && !weak && has_weak_ref)
4188 ret = binder_put_node_cmd(
4189 proc, thread, &ptr, node_ptr,
4190 node_cookie, node_debug_id,
4191 BR_DECREFS, "BR_DECREFS");
4192 if (orig_ptr == ptr)
4193 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4194 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4195 proc->pid, thread->pid,
4196 node_debug_id,
4197 (u64)node_ptr,
4198 (u64)node_cookie);
4199 if (ret)
4200 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004201 } break;
4202 case BINDER_WORK_DEAD_BINDER:
4203 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4204 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4205 struct binder_ref_death *death;
4206 uint32_t cmd;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004207 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004208
4209 death = container_of(w, struct binder_ref_death, work);
4210 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4211 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4212 else
4213 cmd = BR_DEAD_BINDER;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004214 cookie = death->cookie;
4215
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004216 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004217 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004218 proc->pid, thread->pid,
4219 cmd == BR_DEAD_BINDER ?
4220 "BR_DEAD_BINDER" :
4221 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenf9eac642017-05-22 11:26:23 -07004222 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004223 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenf9eac642017-05-22 11:26:23 -07004224 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004225 kfree(death);
4226 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004227 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004228 binder_enqueue_work_ilocked(
4229 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004230 binder_inner_proc_unlock(proc);
4231 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07004232 if (put_user(cmd, (uint32_t __user *)ptr))
4233 return -EFAULT;
4234 ptr += sizeof(uint32_t);
4235 if (put_user(cookie,
4236 (binder_uintptr_t __user *)ptr))
4237 return -EFAULT;
4238 ptr += sizeof(binder_uintptr_t);
4239 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004240 if (cmd == BR_DEAD_BINDER)
4241 goto done; /* DEAD_BINDER notifications can cause transactions */
4242 } break;
4243 }
4244
4245 if (!t)
4246 continue;
4247
4248 BUG_ON(t->buffer == NULL);
4249 if (t->buffer->target_node) {
4250 struct binder_node *target_node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004251 struct binder_priority node_prio;
Seunghun Lee10f62862014-05-01 01:30:23 +09004252
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004253 tr.target.ptr = target_node->ptr;
4254 tr.cookie = target_node->cookie;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004255 node_prio.sched_policy = target_node->sched_policy;
4256 node_prio.prio = target_node->min_priority;
Martijn Coenenc46810c2017-06-23 10:13:43 -07004257 binder_transaction_priority(current, t, node_prio,
4258 target_node->inherit_rt);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004259 cmd = BR_TRANSACTION;
4260 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004261 tr.target.ptr = 0;
4262 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004263 cmd = BR_REPLY;
4264 }
4265 tr.code = t->code;
4266 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06004267 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004268
Todd Kjos2f993e22017-05-12 14:42:55 -07004269 t_from = binder_get_txn_from(t);
4270 if (t_from) {
4271 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09004272
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004273 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08004274 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004275 } else {
4276 tr.sender_pid = 0;
4277 }
4278
4279 tr.data_size = t->buffer->data_size;
4280 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07004281 tr.data.ptr.buffer = (binder_uintptr_t)
4282 ((uintptr_t)t->buffer->data +
4283 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004284 tr.data.ptr.offsets = tr.data.ptr.buffer +
4285 ALIGN(t->buffer->data_size,
4286 sizeof(void *));
4287
Todd Kjos2f993e22017-05-12 14:42:55 -07004288 if (put_user(cmd, (uint32_t __user *)ptr)) {
4289 if (t_from)
4290 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004291
4292 binder_cleanup_transaction(t, "put_user failed",
4293 BR_FAILED_REPLY);
4294
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004295 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004296 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004297 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07004298 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4299 if (t_from)
4300 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004301
4302 binder_cleanup_transaction(t, "copy_to_user failed",
4303 BR_FAILED_REPLY);
4304
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004305 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004306 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004307 ptr += sizeof(tr);
4308
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004309 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004310 binder_stat_br(proc, thread, cmd);
4311 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004312 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004313 proc->pid, thread->pid,
4314 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4315 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07004316 t->debug_id, t_from ? t_from->proc->pid : 0,
4317 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004318 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004319 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004320
Todd Kjos2f993e22017-05-12 14:42:55 -07004321 if (t_from)
4322 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004323 t->buffer->allow_user_free = 1;
4324 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07004325 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004326 t->to_parent = thread->transaction_stack;
4327 t->to_thread = thread;
4328 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07004329 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004330 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07004331 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004332 }
4333 break;
4334 }
4335
4336done:
4337
4338 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07004339 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004340 if (proc->requested_threads == 0 &&
4341 list_empty(&thread->proc->waiting_threads) &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004342 proc->requested_threads_started < proc->max_threads &&
4343 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4344 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4345 /*spawn a new thread if we leave this out */) {
4346 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07004347 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004348 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304349 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004350 proc->pid, thread->pid);
4351 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4352 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07004353 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07004354 } else
4355 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004356 return 0;
4357}
4358
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004359static void binder_release_work(struct binder_proc *proc,
4360 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004361{
4362 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09004363
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004364 while (1) {
4365 w = binder_dequeue_work_head(proc, list);
4366 if (!w)
4367 return;
4368
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004369 switch (w->type) {
4370 case BINDER_WORK_TRANSACTION: {
4371 struct binder_transaction *t;
4372
4373 t = container_of(w, struct binder_transaction, work);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004374
4375 binder_cleanup_transaction(t, "process died.",
4376 BR_DEAD_REPLY);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004377 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004378 case BINDER_WORK_RETURN_ERROR: {
4379 struct binder_error *e = container_of(
4380 w, struct binder_error, work);
4381
4382 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4383 "undelivered TRANSACTION_ERROR: %u\n",
4384 e->cmd);
4385 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004386 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004387 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304388 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004389 kfree(w);
4390 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4391 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004392 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4393 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4394 struct binder_ref_death *death;
4395
4396 death = container_of(w, struct binder_ref_death, work);
4397 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004398 "undelivered death notification, %016llx\n",
4399 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004400 kfree(death);
4401 binder_stats_deleted(BINDER_STAT_DEATH);
4402 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004403 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304404 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004405 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004406 break;
4407 }
4408 }
4409
4410}
4411
Todd Kjosb4827902017-05-25 15:52:17 -07004412static struct binder_thread *binder_get_thread_ilocked(
4413 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004414{
4415 struct binder_thread *thread = NULL;
4416 struct rb_node *parent = NULL;
4417 struct rb_node **p = &proc->threads.rb_node;
4418
4419 while (*p) {
4420 parent = *p;
4421 thread = rb_entry(parent, struct binder_thread, rb_node);
4422
4423 if (current->pid < thread->pid)
4424 p = &(*p)->rb_left;
4425 else if (current->pid > thread->pid)
4426 p = &(*p)->rb_right;
4427 else
Todd Kjosb4827902017-05-25 15:52:17 -07004428 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004429 }
Todd Kjosb4827902017-05-25 15:52:17 -07004430 if (!new_thread)
4431 return NULL;
4432 thread = new_thread;
4433 binder_stats_created(BINDER_STAT_THREAD);
4434 thread->proc = proc;
4435 thread->pid = current->pid;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004436 get_task_struct(current);
4437 thread->task = current;
Todd Kjosb4827902017-05-25 15:52:17 -07004438 atomic_set(&thread->tmp_ref, 0);
4439 init_waitqueue_head(&thread->wait);
4440 INIT_LIST_HEAD(&thread->todo);
4441 rb_link_node(&thread->rb_node, parent, p);
4442 rb_insert_color(&thread->rb_node, &proc->threads);
4443 thread->looper_need_return = true;
4444 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4445 thread->return_error.cmd = BR_OK;
4446 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4447 thread->reply_error.cmd = BR_OK;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004448 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004449 return thread;
4450}
4451
4452static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4453{
4454 struct binder_thread *thread;
4455 struct binder_thread *new_thread;
4456
4457 binder_inner_proc_lock(proc);
4458 thread = binder_get_thread_ilocked(proc, NULL);
4459 binder_inner_proc_unlock(proc);
4460 if (!thread) {
4461 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4462 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004463 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004464 binder_inner_proc_lock(proc);
4465 thread = binder_get_thread_ilocked(proc, new_thread);
4466 binder_inner_proc_unlock(proc);
4467 if (thread != new_thread)
4468 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004469 }
4470 return thread;
4471}
4472
Todd Kjos2f993e22017-05-12 14:42:55 -07004473static void binder_free_proc(struct binder_proc *proc)
4474{
4475 BUG_ON(!list_empty(&proc->todo));
4476 BUG_ON(!list_empty(&proc->delivered_death));
4477 binder_alloc_deferred_release(&proc->alloc);
4478 put_task_struct(proc->tsk);
4479 binder_stats_deleted(BINDER_STAT_PROC);
4480 kfree(proc);
4481}
4482
4483static void binder_free_thread(struct binder_thread *thread)
4484{
4485 BUG_ON(!list_empty(&thread->todo));
4486 binder_stats_deleted(BINDER_STAT_THREAD);
4487 binder_proc_dec_tmpref(thread->proc);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004488 put_task_struct(thread->task);
Todd Kjos2f993e22017-05-12 14:42:55 -07004489 kfree(thread);
4490}
4491
4492static int binder_thread_release(struct binder_proc *proc,
4493 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004494{
4495 struct binder_transaction *t;
4496 struct binder_transaction *send_reply = NULL;
4497 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004498 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004499
Todd Kjosb4827902017-05-25 15:52:17 -07004500 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004501 /*
4502 * take a ref on the proc so it survives
4503 * after we remove this thread from proc->threads.
4504 * The corresponding dec is when we actually
4505 * free the thread in binder_free_thread()
4506 */
4507 proc->tmp_ref++;
4508 /*
4509 * take a ref on this thread to ensure it
4510 * survives while we are releasing it
4511 */
4512 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004513 rb_erase(&thread->rb_node, &proc->threads);
4514 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004515 if (t) {
4516 spin_lock(&t->lock);
4517 if (t->to_thread == thread)
4518 send_reply = t;
4519 }
4520 thread->is_dead = true;
4521
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004522 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004523 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004524 active_transactions++;
4525 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304526 "release %d:%d transaction %d %s, still active\n",
4527 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004528 t->debug_id,
4529 (t->to_thread == thread) ? "in" : "out");
4530
4531 if (t->to_thread == thread) {
4532 t->to_proc = NULL;
4533 t->to_thread = NULL;
4534 if (t->buffer) {
4535 t->buffer->transaction = NULL;
4536 t->buffer = NULL;
4537 }
4538 t = t->to_parent;
4539 } else if (t->from == thread) {
4540 t->from = NULL;
4541 t = t->from_parent;
4542 } else
4543 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004544 spin_unlock(&last_t->lock);
4545 if (t)
4546 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004547 }
Martijn Coenen550c01d2018-01-05 11:27:07 +01004548
4549 /*
4550 * If this thread used poll, make sure we remove the waitqueue
4551 * from any epoll data structures holding it with POLLFREE.
4552 * waitqueue_active() is safe to use here because we're holding
4553 * the inner lock.
4554 */
4555 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4556 waitqueue_active(&thread->wait)) {
4557 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4558 }
4559
Todd Kjosb4827902017-05-25 15:52:17 -07004560 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004561
Martijn Coenen72766d72018-02-16 09:47:15 +01004562 /*
4563 * This is needed to avoid races between wake_up_poll() above and
4564 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4565 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4566 * lock, so we can be sure it's done after calling synchronize_rcu().
4567 */
4568 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4569 synchronize_rcu();
4570
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004571 if (send_reply)
4572 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004573 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004574 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004575 return active_transactions;
4576}
4577
4578static unsigned int binder_poll(struct file *filp,
4579 struct poll_table_struct *wait)
4580{
4581 struct binder_proc *proc = filp->private_data;
4582 struct binder_thread *thread = NULL;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004583 bool wait_for_proc_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004584
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004585 thread = binder_get_thread(proc);
Greg Kroah-Hartman6e463bb2018-02-28 17:17:14 +01004586 if (!thread)
Eric Biggers4be5a282018-01-30 23:11:24 -08004587 return POLLERR;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004588
Martijn Coenen995a36e2017-06-02 13:36:52 -07004589 binder_inner_proc_lock(thread->proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004590 thread->looper |= BINDER_LOOPER_STATE_POLL;
4591 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4592
Martijn Coenen995a36e2017-06-02 13:36:52 -07004593 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004594
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004595 poll_wait(filp, &thread->wait, wait);
4596
Martijn Coenen47810932017-08-10 12:32:00 +02004597 if (binder_has_work(thread, wait_for_proc_work))
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004598 return POLLIN;
4599
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004600 return 0;
4601}
4602
Tair Rzayev78260ac2014-06-03 22:27:21 +03004603static int binder_ioctl_write_read(struct file *filp,
4604 unsigned int cmd, unsigned long arg,
4605 struct binder_thread *thread)
4606{
4607 int ret = 0;
4608 struct binder_proc *proc = filp->private_data;
4609 unsigned int size = _IOC_SIZE(cmd);
4610 void __user *ubuf = (void __user *)arg;
4611 struct binder_write_read bwr;
4612
4613 if (size != sizeof(struct binder_write_read)) {
4614 ret = -EINVAL;
4615 goto out;
4616 }
4617 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4618 ret = -EFAULT;
4619 goto out;
4620 }
4621 binder_debug(BINDER_DEBUG_READ_WRITE,
4622 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4623 proc->pid, thread->pid,
4624 (u64)bwr.write_size, (u64)bwr.write_buffer,
4625 (u64)bwr.read_size, (u64)bwr.read_buffer);
4626
4627 if (bwr.write_size > 0) {
4628 ret = binder_thread_write(proc, thread,
4629 bwr.write_buffer,
4630 bwr.write_size,
4631 &bwr.write_consumed);
4632 trace_binder_write_done(ret);
4633 if (ret < 0) {
4634 bwr.read_consumed = 0;
4635 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4636 ret = -EFAULT;
4637 goto out;
4638 }
4639 }
4640 if (bwr.read_size > 0) {
4641 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4642 bwr.read_size,
4643 &bwr.read_consumed,
4644 filp->f_flags & O_NONBLOCK);
4645 trace_binder_read_done(ret);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004646 binder_inner_proc_lock(proc);
4647 if (!binder_worklist_empty_ilocked(&proc->todo))
Martijn Coenen053be422017-06-06 15:17:46 -07004648 binder_wakeup_proc_ilocked(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004649 binder_inner_proc_unlock(proc);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004650 if (ret < 0) {
4651 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4652 ret = -EFAULT;
4653 goto out;
4654 }
4655 }
4656 binder_debug(BINDER_DEBUG_READ_WRITE,
4657 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4658 proc->pid, thread->pid,
4659 (u64)bwr.write_consumed, (u64)bwr.write_size,
4660 (u64)bwr.read_consumed, (u64)bwr.read_size);
4661 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4662 ret = -EFAULT;
4663 goto out;
4664 }
4665out:
4666 return ret;
4667}
4668
4669static int binder_ioctl_set_ctx_mgr(struct file *filp)
4670{
4671 int ret = 0;
4672 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004673 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004674 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004675 kuid_t curr_euid = current_euid();
4676
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004677 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004678 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004679 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4680 ret = -EBUSY;
4681 goto out;
4682 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004683 ret = security_binder_set_context_mgr(proc->tsk);
4684 if (ret < 0)
4685 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004686 if (uid_valid(context->binder_context_mgr_uid)) {
4687 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004688 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4689 from_kuid(&init_user_ns, curr_euid),
4690 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004691 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004692 ret = -EPERM;
4693 goto out;
4694 }
4695 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004696 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004697 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004698 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004699 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004700 ret = -ENOMEM;
4701 goto out;
4702 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004703 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004704 new_node->local_weak_refs++;
4705 new_node->local_strong_refs++;
4706 new_node->has_strong_ref = 1;
4707 new_node->has_weak_ref = 1;
4708 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004709 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004710 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004711out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004712 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004713 return ret;
4714}
4715
Martijn Coenen1c57ba42018-08-25 13:50:56 -07004716static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4717 struct binder_node_info_for_ref *info)
4718{
4719 struct binder_node *node;
4720 struct binder_context *context = proc->context;
4721 __u32 handle = info->handle;
4722
4723 if (info->strong_count || info->weak_count || info->reserved1 ||
4724 info->reserved2 || info->reserved3) {
4725 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4726 proc->pid);
4727 return -EINVAL;
4728 }
4729
4730 /* This ioctl may only be used by the context manager */
4731 mutex_lock(&context->context_mgr_node_lock);
4732 if (!context->binder_context_mgr_node ||
4733 context->binder_context_mgr_node->proc != proc) {
4734 mutex_unlock(&context->context_mgr_node_lock);
4735 return -EPERM;
4736 }
4737 mutex_unlock(&context->context_mgr_node_lock);
4738
4739 node = binder_get_node_from_ref(proc, handle, true, NULL);
4740 if (!node)
4741 return -EINVAL;
4742
4743 info->strong_count = node->local_strong_refs +
4744 node->internal_strong_refs;
4745 info->weak_count = node->local_weak_refs;
4746
4747 binder_put_node(node);
4748
4749 return 0;
4750}
4751
Colin Cross833babb32017-06-20 13:54:44 -07004752static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4753 struct binder_node_debug_info *info) {
4754 struct rb_node *n;
4755 binder_uintptr_t ptr = info->ptr;
4756
4757 memset(info, 0, sizeof(*info));
4758
4759 binder_inner_proc_lock(proc);
4760 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4761 struct binder_node *node = rb_entry(n, struct binder_node,
4762 rb_node);
4763 if (node->ptr > ptr) {
4764 info->ptr = node->ptr;
4765 info->cookie = node->cookie;
4766 info->has_strong_ref = node->has_strong_ref;
4767 info->has_weak_ref = node->has_weak_ref;
4768 break;
4769 }
4770 }
4771 binder_inner_proc_unlock(proc);
4772
4773 return 0;
4774}
4775
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004776static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4777{
4778 int ret;
4779 struct binder_proc *proc = filp->private_data;
4780 struct binder_thread *thread;
4781 unsigned int size = _IOC_SIZE(cmd);
4782 void __user *ubuf = (void __user *)arg;
4783
Tair Rzayev78260ac2014-06-03 22:27:21 +03004784 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4785 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004786
Sherry Yang435416b2017-06-22 14:37:45 -07004787 binder_selftest_alloc(&proc->alloc);
4788
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004789 trace_binder_ioctl(cmd, arg);
4790
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004791 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4792 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004793 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004794
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004795 thread = binder_get_thread(proc);
4796 if (thread == NULL) {
4797 ret = -ENOMEM;
4798 goto err;
4799 }
4800
4801 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004802 case BINDER_WRITE_READ:
4803 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4804 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004805 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004806 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004807 case BINDER_SET_MAX_THREADS: {
4808 int max_threads;
4809
4810 if (copy_from_user(&max_threads, ubuf,
4811 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004812 ret = -EINVAL;
4813 goto err;
4814 }
Todd Kjosd600e902017-05-25 17:35:02 -07004815 binder_inner_proc_lock(proc);
4816 proc->max_threads = max_threads;
4817 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004818 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004819 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004820 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004821 ret = binder_ioctl_set_ctx_mgr(filp);
4822 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004823 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004824 break;
4825 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304826 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004827 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004828 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004829 thread = NULL;
4830 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004831 case BINDER_VERSION: {
4832 struct binder_version __user *ver = ubuf;
4833
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004834 if (size != sizeof(struct binder_version)) {
4835 ret = -EINVAL;
4836 goto err;
4837 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004838 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4839 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004840 ret = -EINVAL;
4841 goto err;
4842 }
4843 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004844 }
Martijn Coenen1c57ba42018-08-25 13:50:56 -07004845 case BINDER_GET_NODE_INFO_FOR_REF: {
4846 struct binder_node_info_for_ref info;
4847
4848 if (copy_from_user(&info, ubuf, sizeof(info))) {
4849 ret = -EFAULT;
4850 goto err;
4851 }
4852
4853 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4854 if (ret < 0)
4855 goto err;
4856
4857 if (copy_to_user(ubuf, &info, sizeof(info))) {
4858 ret = -EFAULT;
4859 goto err;
4860 }
4861
4862 break;
4863 }
Colin Cross833babb32017-06-20 13:54:44 -07004864 case BINDER_GET_NODE_DEBUG_INFO: {
4865 struct binder_node_debug_info info;
4866
4867 if (copy_from_user(&info, ubuf, sizeof(info))) {
4868 ret = -EFAULT;
4869 goto err;
4870 }
4871
4872 ret = binder_ioctl_get_node_debug_info(proc, &info);
4873 if (ret < 0)
4874 goto err;
4875
4876 if (copy_to_user(ubuf, &info, sizeof(info))) {
4877 ret = -EFAULT;
4878 goto err;
4879 }
4880 break;
4881 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004882 default:
4883 ret = -EINVAL;
4884 goto err;
4885 }
4886 ret = 0;
4887err:
4888 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004889 thread->looper_need_return = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004890 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4891 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304892 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004893err_unlocked:
4894 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004895 return ret;
4896}
4897
4898static void binder_vma_open(struct vm_area_struct *vma)
4899{
4900 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004901
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004902 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304903 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004904 proc->pid, vma->vm_start, vma->vm_end,
4905 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4906 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004907}
4908
4909static void binder_vma_close(struct vm_area_struct *vma)
4910{
4911 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004912
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004913 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304914 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004915 proc->pid, vma->vm_start, vma->vm_end,
4916 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4917 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004918 binder_alloc_vma_close(&proc->alloc);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02004919 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004920}
4921
Vinayak Menonddac7d52014-06-02 18:17:59 +05304922static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4923{
4924 return VM_FAULT_SIGBUS;
4925}
4926
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004927static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004928 .open = binder_vma_open,
4929 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304930 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004931};
4932
Todd Kjosd325d372016-10-10 10:40:53 -07004933static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4934{
4935 int ret;
4936 struct binder_proc *proc = filp->private_data;
4937 const char *failure_string;
4938
4939 if (proc->tsk != current->group_leader)
4940 return -EINVAL;
4941
4942 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4943 vma->vm_end = vma->vm_start + SZ_4M;
4944
4945 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4946 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4947 __func__, proc->pid, vma->vm_start, vma->vm_end,
4948 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4949 (unsigned long)pgprot_val(vma->vm_page_prot));
4950
4951 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4952 ret = -EPERM;
4953 failure_string = "bad vm_flags";
4954 goto err_bad_arg;
4955 }
Minchan Kim2cafd5b2018-05-07 23:15:37 +09004956 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4957 vma->vm_flags &= ~VM_MAYWRITE;
4958
Todd Kjosd325d372016-10-10 10:40:53 -07004959 vma->vm_ops = &binder_vm_ops;
4960 vma->vm_private_data = proc;
4961
4962 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02004963 if (ret)
4964 return ret;
Todd Kjosfbb43392017-11-27 09:32:33 -08004965 mutex_lock(&proc->files_lock);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02004966 proc->files = get_files_struct(current);
Todd Kjosfbb43392017-11-27 09:32:33 -08004967 mutex_unlock(&proc->files_lock);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02004968 return 0;
Todd Kjosd325d372016-10-10 10:40:53 -07004969
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004970err_bad_arg:
Elad Wexler6b646402017-12-29 11:03:37 +02004971 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004972 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4973 return ret;
4974}
4975
4976static int binder_open(struct inode *nodp, struct file *filp)
4977{
4978 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004979 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004980
Elad Wexler6b646402017-12-29 11:03:37 +02004981 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004982 current->group_leader->pid, current->pid);
4983
4984 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4985 if (proc == NULL)
4986 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004987 spin_lock_init(&proc->inner_lock);
4988 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004989 get_task_struct(current->group_leader);
4990 proc->tsk = current->group_leader;
Todd Kjosfbb43392017-11-27 09:32:33 -08004991 mutex_init(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004992 INIT_LIST_HEAD(&proc->todo);
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004993 if (binder_supported_policy(current->policy)) {
4994 proc->default_priority.sched_policy = current->policy;
4995 proc->default_priority.prio = current->normal_prio;
4996 } else {
4997 proc->default_priority.sched_policy = SCHED_NORMAL;
4998 proc->default_priority.prio = NICE_TO_PRIO(0);
4999 }
5000
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005001 binder_dev = container_of(filp->private_data, struct binder_device,
5002 miscdev);
5003 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07005004 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005005
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005006 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005007 proc->pid = current->group_leader->pid;
5008 INIT_LIST_HEAD(&proc->delivered_death);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005009 INIT_LIST_HEAD(&proc->waiting_threads);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005010 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005011
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005012 mutex_lock(&binder_procs_lock);
5013 hlist_add_head(&proc->proc_node, &binder_procs);
5014 mutex_unlock(&binder_procs_lock);
5015
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005016 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005017 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09005018
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005019 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005020 /*
5021 * proc debug entries are shared between contexts, so
5022 * this will fail if the process tries to open the driver
5023 * again with a different context. The priting code will
5024 * anyway print all contexts that a given PID has, so this
5025 * is not a problem.
5026 */
Harsh Shandilya174562a2017-12-22 19:37:02 +05305027 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005028 binder_debugfs_dir_entry_proc,
5029 (void *)(unsigned long)proc->pid,
5030 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005031 }
5032
5033 return 0;
5034}
5035
5036static int binder_flush(struct file *filp, fl_owner_t id)
5037{
5038 struct binder_proc *proc = filp->private_data;
5039
5040 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5041
5042 return 0;
5043}
5044
5045static void binder_deferred_flush(struct binder_proc *proc)
5046{
5047 struct rb_node *n;
5048 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09005049
Todd Kjosb4827902017-05-25 15:52:17 -07005050 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005051 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5052 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09005053
Todd Kjos6798e6d2017-01-06 14:19:25 -08005054 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005055 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5056 wake_up_interruptible(&thread->wait);
5057 wake_count++;
5058 }
5059 }
Todd Kjosb4827902017-05-25 15:52:17 -07005060 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005061
5062 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5063 "binder_flush: %d woke %d threads\n", proc->pid,
5064 wake_count);
5065}
5066
5067static int binder_release(struct inode *nodp, struct file *filp)
5068{
5069 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09005070
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005071 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005072 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5073
5074 return 0;
5075}
5076
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005077static int binder_node_release(struct binder_node *node, int refs)
5078{
5079 struct binder_ref *ref;
5080 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07005081 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005082
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005083 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005084
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005085 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005086 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005087 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07005088 /*
5089 * The caller must have taken a temporary ref on the node,
5090 */
5091 BUG_ON(!node->tmp_refs);
5092 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07005093 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005094 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005095 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005096
5097 return refs;
5098 }
5099
5100 node->proc = NULL;
5101 node->local_strong_refs = 0;
5102 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07005103 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005104
5105 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005106 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005107 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005108
5109 hlist_for_each_entry(ref, &node->refs, node_entry) {
5110 refs++;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005111 /*
5112 * Need the node lock to synchronize
5113 * with new notification requests and the
5114 * inner lock to synchronize with queued
5115 * death notifications.
5116 */
5117 binder_inner_proc_lock(ref->proc);
5118 if (!ref->death) {
5119 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08005120 continue;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005121 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005122
5123 death++;
5124
Martijn Coenenf9eac642017-05-22 11:26:23 -07005125 BUG_ON(!list_empty(&ref->death->work.entry));
5126 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5127 binder_enqueue_work_ilocked(&ref->death->work,
5128 &ref->proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07005129 binder_wakeup_proc_ilocked(ref->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005130 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005131 }
5132
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005133 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5134 "node %d now dead, refs %d, death %d\n",
5135 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005136 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07005137 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005138
5139 return refs;
5140}
5141
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005142static void binder_deferred_release(struct binder_proc *proc)
5143{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005144 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005145 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07005146 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005147
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005148 BUG_ON(proc->files);
5149
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005150 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005151 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005152 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005153
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005154 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005155 if (context->binder_context_mgr_node &&
5156 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005157 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005158 "%s: %d context_mgr_node gone\n",
5159 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005160 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005161 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005162 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07005163 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07005164 /*
5165 * Make sure proc stays alive after we
5166 * remove all the threads
5167 */
5168 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005169
Todd Kjos2f993e22017-05-12 14:42:55 -07005170 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005171 threads = 0;
5172 active_transactions = 0;
5173 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005174 struct binder_thread *thread;
5175
5176 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07005177 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005178 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07005179 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07005180 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005181 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005182
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005183 nodes = 0;
5184 incoming_refs = 0;
5185 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005186 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005187
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005188 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005189 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07005190 /*
5191 * take a temporary ref on the node before
5192 * calling binder_node_release() which will either
5193 * kfree() the node or call binder_put_node()
5194 */
Todd Kjos425d23f2017-06-12 12:07:26 -07005195 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005196 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07005197 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005198 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07005199 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005200 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005201 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005202
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005203 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005204 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005205 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005206 struct binder_ref *ref;
5207
5208 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005209 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07005210 binder_cleanup_ref_olocked(ref);
5211 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005212 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07005213 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005214 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005215 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005216
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005217 binder_release_work(proc, &proc->todo);
5218 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005219
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005220 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07005221 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005222 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07005223 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005224
Todd Kjos2f993e22017-05-12 14:42:55 -07005225 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005226}
5227
5228static void binder_deferred_func(struct work_struct *work)
5229{
5230 struct binder_proc *proc;
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005231 struct files_struct *files;
5232
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005233 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09005234
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005235 do {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005236 mutex_lock(&binder_deferred_lock);
5237 if (!hlist_empty(&binder_deferred_list)) {
5238 proc = hlist_entry(binder_deferred_list.first,
5239 struct binder_proc, deferred_work_node);
5240 hlist_del_init(&proc->deferred_work_node);
5241 defer = proc->deferred_work;
5242 proc->deferred_work = 0;
5243 } else {
5244 proc = NULL;
5245 defer = 0;
5246 }
5247 mutex_unlock(&binder_deferred_lock);
5248
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005249 files = NULL;
5250 if (defer & BINDER_DEFERRED_PUT_FILES) {
Todd Kjosfbb43392017-11-27 09:32:33 -08005251 mutex_lock(&proc->files_lock);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005252 files = proc->files;
5253 if (files)
5254 proc->files = NULL;
Todd Kjosfbb43392017-11-27 09:32:33 -08005255 mutex_unlock(&proc->files_lock);
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005256 }
5257
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005258 if (defer & BINDER_DEFERRED_FLUSH)
5259 binder_deferred_flush(proc);
5260
5261 if (defer & BINDER_DEFERRED_RELEASE)
5262 binder_deferred_release(proc); /* frees proc */
Martijn Coenen6f7e5f92018-06-15 11:53:36 +02005263
5264 if (files)
5265 put_files_struct(files);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005266 } while (proc);
5267}
5268static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5269
5270static void
5271binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5272{
5273 mutex_lock(&binder_deferred_lock);
5274 proc->deferred_work |= defer;
5275 if (hlist_unhashed(&proc->deferred_work_node)) {
5276 hlist_add_head(&proc->deferred_work_node,
5277 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05305278 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005279 }
5280 mutex_unlock(&binder_deferred_lock);
5281}
5282
Todd Kjos6d241a42017-04-21 14:32:11 -07005283static void print_binder_transaction_ilocked(struct seq_file *m,
5284 struct binder_proc *proc,
5285 const char *prefix,
5286 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005287{
Todd Kjos6d241a42017-04-21 14:32:11 -07005288 struct binder_proc *to_proc;
5289 struct binder_buffer *buffer = t->buffer;
5290
Todd Kjos2f993e22017-05-12 14:42:55 -07005291 spin_lock(&t->lock);
Todd Kjos6d241a42017-04-21 14:32:11 -07005292 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005293 seq_printf(m,
Todd Kjosf540ce02018-02-07 13:57:37 -08005294 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005295 prefix, t->debug_id, t,
5296 t->from ? t->from->proc->pid : 0,
5297 t->from ? t->from->pid : 0,
Todd Kjos6d241a42017-04-21 14:32:11 -07005298 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005299 t->to_thread ? t->to_thread->pid : 0,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005300 t->code, t->flags, t->priority.sched_policy,
5301 t->priority.prio, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07005302 spin_unlock(&t->lock);
5303
Todd Kjos6d241a42017-04-21 14:32:11 -07005304 if (proc != to_proc) {
5305 /*
5306 * Can only safely deref buffer if we are holding the
5307 * correct proc inner lock for this node
5308 */
5309 seq_puts(m, "\n");
5310 return;
5311 }
5312
5313 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005314 seq_puts(m, " buffer free\n");
5315 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005316 }
Todd Kjos6d241a42017-04-21 14:32:11 -07005317 if (buffer->target_node)
5318 seq_printf(m, " node %d", buffer->target_node->debug_id);
Todd Kjosf540ce02018-02-07 13:57:37 -08005319 seq_printf(m, " size %zd:%zd data %pK\n",
Todd Kjos6d241a42017-04-21 14:32:11 -07005320 buffer->data_size, buffer->offsets_size,
5321 buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005322}
5323
Todd Kjos6d241a42017-04-21 14:32:11 -07005324static void print_binder_work_ilocked(struct seq_file *m,
5325 struct binder_proc *proc,
5326 const char *prefix,
5327 const char *transaction_prefix,
5328 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005329{
5330 struct binder_node *node;
5331 struct binder_transaction *t;
5332
5333 switch (w->type) {
5334 case BINDER_WORK_TRANSACTION:
5335 t = container_of(w, struct binder_transaction, work);
Todd Kjos6d241a42017-04-21 14:32:11 -07005336 print_binder_transaction_ilocked(
5337 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005338 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07005339 case BINDER_WORK_RETURN_ERROR: {
5340 struct binder_error *e = container_of(
5341 w, struct binder_error, work);
5342
5343 seq_printf(m, "%stransaction error: %u\n",
5344 prefix, e->cmd);
5345 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005346 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005347 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005348 break;
5349 case BINDER_WORK_NODE:
5350 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08005351 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5352 prefix, node->debug_id,
5353 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005354 break;
5355 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005356 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005357 break;
5358 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005359 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005360 break;
5361 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005362 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005363 break;
5364 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005365 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005366 break;
5367 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005368}
5369
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005370static void print_binder_thread_ilocked(struct seq_file *m,
5371 struct binder_thread *thread,
5372 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005373{
5374 struct binder_transaction *t;
5375 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005376 size_t start_pos = m->count;
5377 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005378
Todd Kjos2f993e22017-05-12 14:42:55 -07005379 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08005380 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07005381 thread->looper_need_return,
5382 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005383 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005384 t = thread->transaction_stack;
5385 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005386 if (t->from == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005387 print_binder_transaction_ilocked(m, thread->proc,
5388 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005389 t = t->from_parent;
5390 } else if (t->to_thread == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005391 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005392 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005393 t = t->to_parent;
5394 } else {
Todd Kjos6d241a42017-04-21 14:32:11 -07005395 print_binder_transaction_ilocked(m, thread->proc,
5396 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005397 t = NULL;
5398 }
5399 }
5400 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005401 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005402 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005403 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005404 if (!print_always && m->count == header_pos)
5405 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005406}
5407
Todd Kjos425d23f2017-06-12 12:07:26 -07005408static void print_binder_node_nilocked(struct seq_file *m,
5409 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005410{
5411 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005412 struct binder_work *w;
5413 int count;
5414
5415 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08005416 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005417 count++;
5418
Martijn Coenen6aac9792017-06-07 09:29:14 -07005419 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08005420 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Martijn Coenen6aac9792017-06-07 09:29:14 -07005421 node->sched_policy, node->min_priority,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005422 node->has_strong_ref, node->has_weak_ref,
5423 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07005424 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005425 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005426 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08005427 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005428 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005429 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005430 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005431 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005432 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005433 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005434 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005435 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005436}
5437
Todd Kjos5346bf32016-10-20 16:43:34 -07005438static void print_binder_ref_olocked(struct seq_file *m,
5439 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005440{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005441 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005442 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5443 ref->data.debug_id, ref->data.desc,
5444 ref->node->proc ? "" : "dead ",
5445 ref->node->debug_id, ref->data.strong,
5446 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005447 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005448}
5449
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005450static void print_binder_proc(struct seq_file *m,
5451 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005452{
5453 struct binder_work *w;
5454 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005455 size_t start_pos = m->count;
5456 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07005457 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005458
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005459 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005460 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005461 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005462
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005463 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005464 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005465 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005466 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07005467
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005468 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005469 struct binder_node *node = rb_entry(n, struct binder_node,
5470 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005471 /*
5472 * take a temporary reference on the node so it
5473 * survives and isn't removed from the tree
5474 * while we print it.
5475 */
5476 binder_inc_node_tmpref_ilocked(node);
5477 /* Need to drop inner lock to take node lock */
5478 binder_inner_proc_unlock(proc);
5479 if (last_node)
5480 binder_put_node(last_node);
5481 binder_node_inner_lock(node);
5482 print_binder_node_nilocked(m, node);
5483 binder_node_inner_unlock(node);
5484 last_node = node;
5485 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005486 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005487 binder_inner_proc_unlock(proc);
5488 if (last_node)
5489 binder_put_node(last_node);
5490
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005491 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07005492 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005493 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005494 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005495 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07005496 print_binder_ref_olocked(m, rb_entry(n,
5497 struct binder_ref,
5498 rb_node_desc));
5499 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005500 }
Todd Kjosd325d372016-10-10 10:40:53 -07005501 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005502 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005503 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005504 print_binder_work_ilocked(m, proc, " ",
5505 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005506 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005507 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005508 break;
5509 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005510 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005511 if (!print_all && m->count == header_pos)
5512 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005513}
5514
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005515static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005516 "BR_ERROR",
5517 "BR_OK",
5518 "BR_TRANSACTION",
5519 "BR_REPLY",
5520 "BR_ACQUIRE_RESULT",
5521 "BR_DEAD_REPLY",
5522 "BR_TRANSACTION_COMPLETE",
5523 "BR_INCREFS",
5524 "BR_ACQUIRE",
5525 "BR_RELEASE",
5526 "BR_DECREFS",
5527 "BR_ATTEMPT_ACQUIRE",
5528 "BR_NOOP",
5529 "BR_SPAWN_LOOPER",
5530 "BR_FINISHED",
5531 "BR_DEAD_BINDER",
5532 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5533 "BR_FAILED_REPLY"
5534};
5535
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005536static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005537 "BC_TRANSACTION",
5538 "BC_REPLY",
5539 "BC_ACQUIRE_RESULT",
5540 "BC_FREE_BUFFER",
5541 "BC_INCREFS",
5542 "BC_ACQUIRE",
5543 "BC_RELEASE",
5544 "BC_DECREFS",
5545 "BC_INCREFS_DONE",
5546 "BC_ACQUIRE_DONE",
5547 "BC_ATTEMPT_ACQUIRE",
5548 "BC_REGISTER_LOOPER",
5549 "BC_ENTER_LOOPER",
5550 "BC_EXIT_LOOPER",
5551 "BC_REQUEST_DEATH_NOTIFICATION",
5552 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02005553 "BC_DEAD_BINDER_DONE",
5554 "BC_TRANSACTION_SG",
5555 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005556};
5557
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005558static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005559 "proc",
5560 "thread",
5561 "node",
5562 "ref",
5563 "death",
5564 "transaction",
5565 "transaction_complete"
5566};
5567
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005568static void print_binder_stats(struct seq_file *m, const char *prefix,
5569 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005570{
5571 int i;
5572
5573 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005574 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005575 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005576 int temp = atomic_read(&stats->bc[i]);
5577
5578 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005579 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005580 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005581 }
5582
5583 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005584 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005585 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005586 int temp = atomic_read(&stats->br[i]);
5587
5588 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005589 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005590 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005591 }
5592
5593 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005594 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005595 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005596 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005597 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005598 int created = atomic_read(&stats->obj_created[i]);
5599 int deleted = atomic_read(&stats->obj_deleted[i]);
5600
5601 if (created || deleted)
5602 seq_printf(m, "%s%s: active %d total %d\n",
5603 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005604 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005605 created - deleted,
5606 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005607 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005608}
5609
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005610static void print_binder_proc_stats(struct seq_file *m,
5611 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005612{
5613 struct binder_work *w;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005614 struct binder_thread *thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005615 struct rb_node *n;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005616 int count, strong, weak, ready_threads;
Todd Kjosb4827902017-05-25 15:52:17 -07005617 size_t free_async_space =
5618 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005619
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005620 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005621 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005622 count = 0;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005623 ready_threads = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005624 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005625 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5626 count++;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005627
5628 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5629 ready_threads++;
5630
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005631 seq_printf(m, " threads: %d\n", count);
5632 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005633 " ready threads %d\n"
5634 " free async space %zd\n", proc->requested_threads,
5635 proc->requested_threads_started, proc->max_threads,
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005636 ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005637 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005638 count = 0;
5639 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5640 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005641 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005642 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005643 count = 0;
5644 strong = 0;
5645 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005646 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005647 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5648 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5649 rb_node_desc);
5650 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005651 strong += ref->data.strong;
5652 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005653 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005654 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005655 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005656
Todd Kjosd325d372016-10-10 10:40:53 -07005657 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005658 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005659
Sherry Yang91004422017-08-22 17:26:57 -07005660 binder_alloc_print_pages(m, &proc->alloc);
5661
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005662 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005663 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005664 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005665 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005666 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005667 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005668 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005669 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005670
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005671 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005672}
5673
5674
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005675static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005676{
5677 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005678 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005679 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005680
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005681 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005682
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005683 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005684 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005685 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005686 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5687 /*
5688 * take a temporary reference on the node so it
5689 * survives and isn't removed from the list
5690 * while we print it.
5691 */
5692 node->tmp_refs++;
5693 spin_unlock(&binder_dead_nodes_lock);
5694 if (last_node)
5695 binder_put_node(last_node);
5696 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005697 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005698 binder_node_unlock(node);
5699 last_node = node;
5700 spin_lock(&binder_dead_nodes_lock);
5701 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005702 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005703 if (last_node)
5704 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005705
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005706 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005707 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005708 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005709 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005710
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005711 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005712}
5713
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005714static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005715{
5716 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005717
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005718 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005719
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005720 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005721
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005722 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005723 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005724 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005725 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005726
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005727 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005728}
5729
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005730static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005731{
5732 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005733
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005734 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005735 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005736 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005737 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005738 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005739
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005740 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005741}
5742
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005743static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005744{
Riley Andrews83050a42016-02-09 21:05:33 -08005745 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005746 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005747
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005748 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005749 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005750 if (itr->pid == pid) {
5751 seq_puts(m, "binder proc state:\n");
5752 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005753 }
5754 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005755 mutex_unlock(&binder_procs_lock);
5756
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005757 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005758}
5759
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005760static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005761 struct binder_transaction_log_entry *e)
5762{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005763 int debug_id = READ_ONCE(e->debug_id_done);
5764 /*
5765 * read barrier to guarantee debug_id_done read before
5766 * we print the log values
5767 */
5768 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005769 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005770 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005771 e->debug_id, (e->call_type == 2) ? "reply" :
5772 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005773 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005774 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5775 e->return_error, e->return_error_param,
5776 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005777 /*
5778 * read-barrier to guarantee read of debug_id_done after
5779 * done printing the fields of the entry
5780 */
5781 smp_rmb();
5782 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5783 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005784}
5785
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005786static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005787{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005788 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005789 unsigned int log_cur = atomic_read(&log->cur);
5790 unsigned int count;
5791 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005792 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005793
Todd Kjos1cfe6272017-05-24 13:33:28 -07005794 count = log_cur + 1;
5795 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5796 0 : count % ARRAY_SIZE(log->entry);
5797 if (count > ARRAY_SIZE(log->entry) || log->full)
5798 count = ARRAY_SIZE(log->entry);
5799 for (i = 0; i < count; i++) {
5800 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5801
5802 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005803 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005804 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005805}
5806
5807static const struct file_operations binder_fops = {
5808 .owner = THIS_MODULE,
5809 .poll = binder_poll,
5810 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005811 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005812 .mmap = binder_mmap,
5813 .open = binder_open,
5814 .flush = binder_flush,
5815 .release = binder_release,
5816};
5817
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005818BINDER_DEBUG_ENTRY(state);
5819BINDER_DEBUG_ENTRY(stats);
5820BINDER_DEBUG_ENTRY(transactions);
5821BINDER_DEBUG_ENTRY(transaction_log);
5822
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005823static int __init init_binder_device(const char *name)
5824{
5825 int ret;
5826 struct binder_device *binder_device;
5827
5828 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5829 if (!binder_device)
5830 return -ENOMEM;
5831
5832 binder_device->miscdev.fops = &binder_fops;
5833 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5834 binder_device->miscdev.name = name;
5835
5836 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5837 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005838 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005839
5840 ret = misc_register(&binder_device->miscdev);
5841 if (ret < 0) {
5842 kfree(binder_device);
5843 return ret;
5844 }
5845
5846 hlist_add_head(&binder_device->hlist, &binder_devices);
5847
5848 return ret;
5849}
5850
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005851static int __init binder_init(void)
5852{
5853 int ret;
Christian Brauner558ee932017-08-21 16:13:28 +02005854 char *device_name, *device_names, *device_tmp;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005855 struct binder_device *device;
5856 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005857
Tetsuo Handaf8cb8222017-11-29 22:29:47 +09005858 ret = binder_alloc_shrinker_init();
5859 if (ret)
5860 return ret;
Sherry Yang5828d702017-07-29 13:24:11 -07005861
Todd Kjos1cfe6272017-05-24 13:33:28 -07005862 atomic_set(&binder_transaction_log.cur, ~0U);
5863 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5864
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005865 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5866 if (binder_debugfs_dir_entry_root)
5867 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5868 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005869
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005870 if (binder_debugfs_dir_entry_root) {
5871 debugfs_create_file("state",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305872 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005873 binder_debugfs_dir_entry_root,
5874 NULL,
5875 &binder_state_fops);
5876 debugfs_create_file("stats",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305877 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005878 binder_debugfs_dir_entry_root,
5879 NULL,
5880 &binder_stats_fops);
5881 debugfs_create_file("transactions",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305882 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005883 binder_debugfs_dir_entry_root,
5884 NULL,
5885 &binder_transactions_fops);
5886 debugfs_create_file("transaction_log",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305887 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005888 binder_debugfs_dir_entry_root,
5889 &binder_transaction_log,
5890 &binder_transaction_log_fops);
5891 debugfs_create_file("failed_transaction_log",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305892 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005893 binder_debugfs_dir_entry_root,
5894 &binder_transaction_log_failed,
5895 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005896 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005897
5898 /*
5899 * Copy the module_parameter string, because we don't want to
5900 * tokenize it in-place.
5901 */
5902 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5903 if (!device_names) {
5904 ret = -ENOMEM;
5905 goto err_alloc_device_names_failed;
5906 }
5907 strcpy(device_names, binder_devices_param);
5908
Christian Brauner558ee932017-08-21 16:13:28 +02005909 device_tmp = device_names;
5910 while ((device_name = strsep(&device_tmp, ","))) {
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005911 ret = init_binder_device(device_name);
5912 if (ret)
5913 goto err_init_binder_device_failed;
5914 }
5915
5916 return ret;
5917
5918err_init_binder_device_failed:
5919 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5920 misc_deregister(&device->miscdev);
5921 hlist_del(&device->hlist);
5922 kfree(device);
5923 }
Christian Brauner558ee932017-08-21 16:13:28 +02005924
5925 kfree(device_names);
5926
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005927err_alloc_device_names_failed:
5928 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5929
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005930 return ret;
5931}
5932
5933device_initcall(binder_init);
5934
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005935#define CREATE_TRACE_POINTS
5936#include "binder_trace.h"
5937
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005938MODULE_LICENSE("GPL v2");