blob: 2ed6f33d5254eca388d2dcc102fa1e60848d87b6 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
Martijn Coenen22d64e4322017-06-02 11:15:44 -070031 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070075#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070076#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090077
Todd Kjos8d9f6f32016-10-17 12:33:15 -070078static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090079static DEFINE_MUTEX(binder_deferred_lock);
80
Martijn Coenen6b7c7122016-09-30 16:08:09 +020081static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090082static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070083static DEFINE_MUTEX(binder_procs_lock);
84
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090085static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070086static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090087
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070088static struct dentry *binder_debugfs_dir_entry_root;
89static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070090static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070092#define BINDER_DEBUG_ENTRY(name) \
93static int binder_##name##_open(struct inode *inode, struct file *file) \
94{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070095 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070096} \
97\
98static const struct file_operations binder_##name##_fops = { \
99 .owner = THIS_MODULE, \
100 .open = binder_##name##_open, \
101 .read = seq_read, \
102 .llseek = seq_lseek, \
103 .release = single_release, \
104}
105
106static int binder_proc_show(struct seq_file *m, void *unused);
107BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900108
109/* This is only defined in include/asm-arm/sizes.h */
110#ifndef SZ_1K
111#define SZ_1K 0x400
112#endif
113
114#ifndef SZ_4M
115#define SZ_4M 0x400000
116#endif
117
118#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
119
120#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
121
122enum {
123 BINDER_DEBUG_USER_ERROR = 1U << 0,
124 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
125 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
126 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
127 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
128 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
129 BINDER_DEBUG_READ_WRITE = 1U << 6,
130 BINDER_DEBUG_USER_REFS = 1U << 7,
131 BINDER_DEBUG_THREADS = 1U << 8,
132 BINDER_DEBUG_TRANSACTION = 1U << 9,
133 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
134 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
135 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700136 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700137 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900138};
139static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
140 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
Harsh Shandilya174562a2017-12-22 19:37:02 +0530141module_param_named(debug_mask, binder_debug_mask, uint, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900142
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200143static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
144module_param_named(devices, binder_devices_param, charp, S_IRUGO);
145
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900146static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
147static int binder_stop_on_user_error;
148
149static int binder_set_stop_on_user_error(const char *val,
Kees Cook24da2c82017-10-17 19:04:42 -0700150 const struct kernel_param *kp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900151{
152 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900153
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900154 ret = param_set_int(val, kp);
155 if (binder_stop_on_user_error < 2)
156 wake_up(&binder_user_error_wait);
157 return ret;
158}
159module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
Harsh Shandilya174562a2017-12-22 19:37:02 +0530160 param_get_int, &binder_stop_on_user_error, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900161
162#define binder_debug(mask, x...) \
163 do { \
164 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400165 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900166 } while (0)
167
168#define binder_user_error(x...) \
169 do { \
170 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400171 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900172 if (binder_stop_on_user_error) \
173 binder_stop_on_user_error = 2; \
174 } while (0)
175
Martijn Coenen00c80372016-07-13 12:06:49 +0200176#define to_flat_binder_object(hdr) \
177 container_of(hdr, struct flat_binder_object, hdr)
178
179#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
180
Martijn Coenen5a6da532016-09-30 14:10:07 +0200181#define to_binder_buffer_object(hdr) \
182 container_of(hdr, struct binder_buffer_object, hdr)
183
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200184#define to_binder_fd_array_object(hdr) \
185 container_of(hdr, struct binder_fd_array_object, hdr)
186
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900187enum binder_stat_types {
188 BINDER_STAT_PROC,
189 BINDER_STAT_THREAD,
190 BINDER_STAT_NODE,
191 BINDER_STAT_REF,
192 BINDER_STAT_DEATH,
193 BINDER_STAT_TRANSACTION,
194 BINDER_STAT_TRANSACTION_COMPLETE,
195 BINDER_STAT_COUNT
196};
197
198struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700199 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
200 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
201 atomic_t obj_created[BINDER_STAT_COUNT];
202 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900203};
204
205static struct binder_stats binder_stats;
206
207static inline void binder_stats_deleted(enum binder_stat_types type)
208{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700209 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900210}
211
212static inline void binder_stats_created(enum binder_stat_types type)
213{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700214 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900215}
216
217struct binder_transaction_log_entry {
218 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700219 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900220 int call_type;
221 int from_proc;
222 int from_thread;
223 int target_handle;
224 int to_proc;
225 int to_thread;
226 int to_node;
227 int data_size;
228 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700229 int return_error_line;
230 uint32_t return_error;
231 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200232 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900233};
234struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700235 atomic_t cur;
236 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900237 struct binder_transaction_log_entry entry[32];
238};
239static struct binder_transaction_log binder_transaction_log;
240static struct binder_transaction_log binder_transaction_log_failed;
241
242static struct binder_transaction_log_entry *binder_transaction_log_add(
243 struct binder_transaction_log *log)
244{
245 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700246 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900247
Todd Kjos1cfe6272017-05-24 13:33:28 -0700248 if (cur >= ARRAY_SIZE(log->entry))
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -0600249 log->full = true;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700250 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
251 WRITE_ONCE(e->debug_id_done, 0);
252 /*
253 * write-barrier to synchronize access to e->debug_id_done.
254 * We make sure the initialized 0 value is seen before
255 * memset() other fields are zeroed by memset.
256 */
257 smp_wmb();
258 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900259 return e;
260}
261
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200262struct binder_context {
263 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700264 struct mutex context_mgr_node_lock;
265
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200266 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200267 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200268};
269
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200270struct binder_device {
271 struct hlist_node hlist;
272 struct miscdevice miscdev;
273 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200274};
275
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700276/**
277 * struct binder_work - work enqueued on a worklist
278 * @entry: node enqueued on list
279 * @type: type of work to be performed
280 *
281 * There are separate work lists for proc, thread, and node (async).
282 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900283struct binder_work {
284 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700285
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900286 enum {
287 BINDER_WORK_TRANSACTION = 1,
288 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700289 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900290 BINDER_WORK_NODE,
291 BINDER_WORK_DEAD_BINDER,
292 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
293 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
294 } type;
295};
296
Todd Kjos858b8da2017-04-21 17:35:12 -0700297struct binder_error {
298 struct binder_work work;
299 uint32_t cmd;
300};
301
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700302/**
303 * struct binder_node - binder node bookkeeping
304 * @debug_id: unique ID for debugging
305 * (invariant after initialized)
306 * @lock: lock for node fields
307 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700308 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700309 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700310 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700311 * @dead_node: element for binder_dead_nodes list
312 * (protected by binder_dead_nodes_lock)
313 * @proc: binder_proc that owns this node
314 * (invariant after initialized)
315 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700316 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700317 * @internal_strong_refs: used to take strong references when
318 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700319 * (protected by @proc->inner_lock if @proc
320 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700321 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700324 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700327 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700328 * (protected by @proc->inner_lock while @proc
329 * is valid, and by binder_dead_nodes_lock
330 * if @proc is NULL. During inc/dec and node release
331 * it is also protected by @lock to provide safety
332 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700333 * @ptr: userspace pointer for node
334 * (invariant, no lock needed)
335 * @cookie: userspace cookie for node
336 * (invariant, no lock needed)
337 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700338 * (protected by @proc->inner_lock if @proc
339 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700340 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700343 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700346 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700349 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700350 * (protected by @lock)
Martijn Coenen6aac9792017-06-07 09:29:14 -0700351 * @sched_policy: minimum scheduling policy for node
352 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700353 * @accept_fds: file descriptor operations supported for node
354 * (invariant after initialized)
355 * @min_priority: minimum scheduling priority
356 * (invariant after initialized)
Martijn Coenenc46810c2017-06-23 10:13:43 -0700357 * @inherit_rt: inherit RT scheduling policy from caller
358 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700359 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700360 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700361 *
362 * Bookkeeping structure for binder nodes.
363 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900364struct binder_node {
365 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700366 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900367 struct binder_work work;
368 union {
369 struct rb_node rb_node;
370 struct hlist_node dead_node;
371 };
372 struct binder_proc *proc;
373 struct hlist_head refs;
374 int internal_strong_refs;
375 int local_weak_refs;
376 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700377 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800378 binder_uintptr_t ptr;
379 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700380 struct {
381 /*
382 * bitfield elements protected by
383 * proc inner_lock
384 */
385 u8 has_strong_ref:1;
386 u8 pending_strong_ref:1;
387 u8 has_weak_ref:1;
388 u8 pending_weak_ref:1;
389 };
390 struct {
391 /*
392 * invariant after initialization
393 */
Martijn Coenen6aac9792017-06-07 09:29:14 -0700394 u8 sched_policy:2;
Martijn Coenenc46810c2017-06-23 10:13:43 -0700395 u8 inherit_rt:1;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700396 u8 accept_fds:1;
397 u8 min_priority;
398 };
399 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900400 struct list_head async_todo;
401};
402
403struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700404 /**
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
408 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900409 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800410 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411};
412
Todd Kjosb0117bb2017-05-08 09:16:27 -0700413/**
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
419 *
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
424 */
425struct binder_ref_data {
426 int debug_id;
427 uint32_t desc;
428 int strong;
429 int weak;
430};
431
432/**
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700438 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenf9eac642017-05-22 11:26:23 -0700444 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700445 *
446 * Structure to track references from procA to target node (on procB). This
447 * structure is unsafe to access without holding @proc->outer_lock.
448 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900449struct binder_ref {
450 /* Lookups needed: */
451 /* node + proc => ref (transaction) */
452 /* desc + proc => ref (transaction, inc/dec ref) */
453 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700454 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900455 struct rb_node rb_node_desc;
456 struct rb_node rb_node_node;
457 struct hlist_node node_entry;
458 struct binder_proc *proc;
459 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900460 struct binder_ref_death *death;
461};
462
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900463enum binder_deferred_state {
Todd Kjosf09daf12017-11-10 15:30:27 -0800464 BINDER_DEFERRED_FLUSH = 0x01,
465 BINDER_DEFERRED_RELEASE = 0x02,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900466};
467
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700468/**
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700469 * struct binder_priority - scheduler policy and priority
470 * @sched_policy scheduler policy
471 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
472 *
473 * The binder driver supports inheriting the following scheduler policies:
474 * SCHED_NORMAL
475 * SCHED_BATCH
476 * SCHED_FIFO
477 * SCHED_RR
478 */
479struct binder_priority {
480 unsigned int sched_policy;
481 int prio;
482};
483
484/**
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700485 * struct binder_proc - binder process bookkeeping
486 * @proc_node: element for binder_procs list
487 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700488 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700489 * @nodes: rbtree of binder nodes associated with
490 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700491 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700492 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700493 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700494 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700495 * (protected by @outer_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700496 * @waiting_threads: threads currently waiting for proc work
497 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700498 * @pid PID of group_leader of process
499 * (invariant after initialized)
500 * @tsk task_struct for group_leader of process
501 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700502 * @deferred_work_node: element for binder_deferred_list
503 * (protected by binder_deferred_lock)
504 * @deferred_work: bitmap of deferred work to perform
505 * (protected by binder_deferred_lock)
506 * @is_dead: process is dead and awaiting free
507 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700508 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700509 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700510 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700511 * @stats: per-process binder statistics
512 * (atomics, no lock needed)
513 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700514 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700515 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700516 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700517 * @requested_threads: number of binder threads requested but not
518 * yet started. In current implementation, can
519 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700520 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700521 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700522 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700523 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700524 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700525 * @default_priority: default scheduler priority
526 * (invariant after initialized)
527 * @debugfs_entry: debugfs node
528 * @alloc: binder allocator bookkeeping
529 * @context: binder_context for this proc
530 * (invariant after initialized)
531 * @inner_lock: can nest under outer_lock and/or node lock
532 * @outer_lock: no nesting under innor or node lock
533 * Lock order: 1) outer, 2) node, 3) inner
534 *
535 * Bookkeeping structure for binder processes
536 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900537struct binder_proc {
538 struct hlist_node proc_node;
539 struct rb_root threads;
540 struct rb_root nodes;
541 struct rb_root refs_by_desc;
542 struct rb_root refs_by_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700543 struct list_head waiting_threads;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900544 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900545 struct task_struct *tsk;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900546 struct hlist_node deferred_work_node;
547 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700548 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900549
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900550 struct list_head todo;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900551 struct binder_stats stats;
552 struct list_head delivered_death;
553 int max_threads;
554 int requested_threads;
555 int requested_threads_started;
Todd Kjos2f993e22017-05-12 14:42:55 -0700556 int tmp_ref;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700557 struct binder_priority default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700558 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700559 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200560 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700561 spinlock_t inner_lock;
562 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900563};
564
565enum {
566 BINDER_LOOPER_STATE_REGISTERED = 0x01,
567 BINDER_LOOPER_STATE_ENTERED = 0x02,
568 BINDER_LOOPER_STATE_EXITED = 0x04,
569 BINDER_LOOPER_STATE_INVALID = 0x08,
570 BINDER_LOOPER_STATE_WAITING = 0x10,
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700571 BINDER_LOOPER_STATE_POLL = 0x20,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900572};
573
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700574/**
575 * struct binder_thread - binder thread bookkeeping
576 * @proc: binder process for this thread
577 * (invariant after initialization)
578 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700579 * (protected by @proc->inner_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700580 * @waiting_thread_node: element for @proc->waiting_threads list
581 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700582 * @pid: PID for this thread
583 * (invariant after initialization)
584 * @looper: bitmap of looping state
585 * (only accessed by this thread)
586 * @looper_needs_return: looping thread needs to exit driver
587 * (no lock needed)
588 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700589 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700590 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700591 * (protected by @proc->inner_lock)
Martijn Coenen1af61802017-10-19 15:04:46 +0200592 * @process_todo: whether work in @todo should be processed
593 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700594 * @return_error: transaction errors reported by this thread
595 * (only accessed by this thread)
596 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700597 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700598 * @wait: wait queue for thread work
599 * @stats: per-thread statistics
600 * (atomics, no lock needed)
601 * @tmp_ref: temporary reference to indicate thread is in use
602 * (atomic since @proc->inner_lock cannot
603 * always be acquired)
604 * @is_dead: thread is dead and awaiting free
605 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700606 * (protected by @proc->inner_lock)
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700607 * @task: struct task_struct for this thread
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700608 *
609 * Bookkeeping structure for binder threads.
610 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900611struct binder_thread {
612 struct binder_proc *proc;
613 struct rb_node rb_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700614 struct list_head waiting_thread_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900615 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800616 int looper; /* only modified by this thread */
617 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900618 struct binder_transaction *transaction_stack;
619 struct list_head todo;
Martijn Coenen1af61802017-10-19 15:04:46 +0200620 bool process_todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700621 struct binder_error return_error;
622 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900623 wait_queue_head_t wait;
624 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700625 atomic_t tmp_ref;
626 bool is_dead;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700627 struct task_struct *task;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900628};
629
630struct binder_transaction {
631 int debug_id;
632 struct binder_work work;
633 struct binder_thread *from;
634 struct binder_transaction *from_parent;
635 struct binder_proc *to_proc;
636 struct binder_thread *to_thread;
637 struct binder_transaction *to_parent;
638 unsigned need_reply:1;
639 /* unsigned is_dead:1; */ /* not used at the moment */
640
641 struct binder_buffer *buffer;
642 unsigned int code;
643 unsigned int flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700644 struct binder_priority priority;
645 struct binder_priority saved_priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700646 bool set_priority_called;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600647 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700648 /**
649 * @lock: protects @from, @to_proc, and @to_thread
650 *
651 * @from, @to_proc, and @to_thread can be set to NULL
652 * during thread teardown
653 */
654 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900655};
656
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700657/**
658 * binder_proc_lock() - Acquire outer lock for given binder_proc
659 * @proc: struct binder_proc to acquire
660 *
661 * Acquires proc->outer_lock. Used to protect binder_ref
662 * structures associated with the given proc.
663 */
664#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
665static void
666_binder_proc_lock(struct binder_proc *proc, int line)
667{
668 binder_debug(BINDER_DEBUG_SPINLOCKS,
669 "%s: line=%d\n", __func__, line);
670 spin_lock(&proc->outer_lock);
671}
672
673/**
674 * binder_proc_unlock() - Release spinlock for given binder_proc
675 * @proc: struct binder_proc to acquire
676 *
677 * Release lock acquired via binder_proc_lock()
678 */
679#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
680static void
681_binder_proc_unlock(struct binder_proc *proc, int line)
682{
683 binder_debug(BINDER_DEBUG_SPINLOCKS,
684 "%s: line=%d\n", __func__, line);
685 spin_unlock(&proc->outer_lock);
686}
687
688/**
689 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
690 * @proc: struct binder_proc to acquire
691 *
692 * Acquires proc->inner_lock. Used to protect todo lists
693 */
694#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
695static void
696_binder_inner_proc_lock(struct binder_proc *proc, int line)
697{
698 binder_debug(BINDER_DEBUG_SPINLOCKS,
699 "%s: line=%d\n", __func__, line);
700 spin_lock(&proc->inner_lock);
701}
702
703/**
704 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
705 * @proc: struct binder_proc to acquire
706 *
707 * Release lock acquired via binder_inner_proc_lock()
708 */
709#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
710static void
711_binder_inner_proc_unlock(struct binder_proc *proc, int line)
712{
713 binder_debug(BINDER_DEBUG_SPINLOCKS,
714 "%s: line=%d\n", __func__, line);
715 spin_unlock(&proc->inner_lock);
716}
717
718/**
719 * binder_node_lock() - Acquire spinlock for given binder_node
720 * @node: struct binder_node to acquire
721 *
722 * Acquires node->lock. Used to protect binder_node fields
723 */
724#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
725static void
726_binder_node_lock(struct binder_node *node, int line)
727{
728 binder_debug(BINDER_DEBUG_SPINLOCKS,
729 "%s: line=%d\n", __func__, line);
730 spin_lock(&node->lock);
731}
732
733/**
734 * binder_node_unlock() - Release spinlock for given binder_proc
735 * @node: struct binder_node to acquire
736 *
737 * Release lock acquired via binder_node_lock()
738 */
739#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
740static void
741_binder_node_unlock(struct binder_node *node, int line)
742{
743 binder_debug(BINDER_DEBUG_SPINLOCKS,
744 "%s: line=%d\n", __func__, line);
745 spin_unlock(&node->lock);
746}
747
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700748/**
749 * binder_node_inner_lock() - Acquire node and inner locks
750 * @node: struct binder_node to acquire
751 *
752 * Acquires node->lock. If node->proc also acquires
753 * proc->inner_lock. Used to protect binder_node fields
754 */
755#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
756static void
757_binder_node_inner_lock(struct binder_node *node, int line)
758{
759 binder_debug(BINDER_DEBUG_SPINLOCKS,
760 "%s: line=%d\n", __func__, line);
761 spin_lock(&node->lock);
762 if (node->proc)
763 binder_inner_proc_lock(node->proc);
764}
765
766/**
767 * binder_node_unlock() - Release node and inner locks
768 * @node: struct binder_node to acquire
769 *
770 * Release lock acquired via binder_node_lock()
771 */
772#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
773static void
774_binder_node_inner_unlock(struct binder_node *node, int line)
775{
776 struct binder_proc *proc = node->proc;
777
778 binder_debug(BINDER_DEBUG_SPINLOCKS,
779 "%s: line=%d\n", __func__, line);
780 if (proc)
781 binder_inner_proc_unlock(proc);
782 spin_unlock(&node->lock);
783}
784
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700785static bool binder_worklist_empty_ilocked(struct list_head *list)
786{
787 return list_empty(list);
788}
789
790/**
791 * binder_worklist_empty() - Check if no items on the work list
792 * @proc: binder_proc associated with list
793 * @list: list to check
794 *
795 * Return: true if there are no items on list, else false
796 */
797static bool binder_worklist_empty(struct binder_proc *proc,
798 struct list_head *list)
799{
800 bool ret;
801
802 binder_inner_proc_lock(proc);
803 ret = binder_worklist_empty_ilocked(list);
804 binder_inner_proc_unlock(proc);
805 return ret;
806}
807
Martijn Coenen1af61802017-10-19 15:04:46 +0200808/**
809 * binder_enqueue_work_ilocked() - Add an item to the work list
810 * @work: struct binder_work to add to list
811 * @target_list: list to add work to
812 *
813 * Adds the work to the specified list. Asserts that work
814 * is not already on a list.
815 *
816 * Requires the proc->inner_lock to be held.
817 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700818static void
819binder_enqueue_work_ilocked(struct binder_work *work,
820 struct list_head *target_list)
821{
822 BUG_ON(target_list == NULL);
823 BUG_ON(work->entry.next && !list_empty(&work->entry));
824 list_add_tail(&work->entry, target_list);
825}
826
827/**
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100828 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
Martijn Coenen1af61802017-10-19 15:04:46 +0200829 * @thread: thread to queue work to
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700830 * @work: struct binder_work to add to list
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700831 *
Martijn Coenen1af61802017-10-19 15:04:46 +0200832 * Adds the work to the todo list of the thread. Doesn't set the process_todo
833 * flag, which means that (if it wasn't already set) the thread will go to
834 * sleep without handling this work when it calls read.
835 *
836 * Requires the proc->inner_lock to be held.
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700837 */
838static void
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100839binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
840 struct binder_work *work)
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700841{
Martijn Coenen1af61802017-10-19 15:04:46 +0200842 binder_enqueue_work_ilocked(work, &thread->todo);
843}
844
845/**
846 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
847 * @thread: thread to queue work to
848 * @work: struct binder_work to add to list
849 *
850 * Adds the work to the todo list of the thread, and enables processing
851 * of the todo queue.
852 *
853 * Requires the proc->inner_lock to be held.
854 */
855static void
856binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
857 struct binder_work *work)
858{
859 binder_enqueue_work_ilocked(work, &thread->todo);
860 thread->process_todo = true;
861}
862
863/**
864 * binder_enqueue_thread_work() - Add an item to the thread work list
865 * @thread: thread to queue work to
866 * @work: struct binder_work to add to list
867 *
868 * Adds the work to the todo list of the thread, and enables processing
869 * of the todo queue.
870 */
871static void
872binder_enqueue_thread_work(struct binder_thread *thread,
873 struct binder_work *work)
874{
875 binder_inner_proc_lock(thread->proc);
876 binder_enqueue_thread_work_ilocked(thread, work);
877 binder_inner_proc_unlock(thread->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700878}
879
880static void
881binder_dequeue_work_ilocked(struct binder_work *work)
882{
883 list_del_init(&work->entry);
884}
885
886/**
887 * binder_dequeue_work() - Removes an item from the work list
888 * @proc: binder_proc associated with list
889 * @work: struct binder_work to remove from list
890 *
891 * Removes the specified work item from whatever list it is on.
892 * Can safely be called if work is not on any list.
893 */
894static void
895binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
896{
897 binder_inner_proc_lock(proc);
898 binder_dequeue_work_ilocked(work);
899 binder_inner_proc_unlock(proc);
900}
901
902static struct binder_work *binder_dequeue_work_head_ilocked(
903 struct list_head *list)
904{
905 struct binder_work *w;
906
907 w = list_first_entry_or_null(list, struct binder_work, entry);
908 if (w)
909 list_del_init(&w->entry);
910 return w;
911}
912
913/**
914 * binder_dequeue_work_head() - Dequeues the item at head of list
915 * @proc: binder_proc associated with list
916 * @list: list to dequeue head
917 *
918 * Removes the head of the list if there are items on the list
919 *
920 * Return: pointer dequeued binder_work, NULL if list was empty
921 */
922static struct binder_work *binder_dequeue_work_head(
923 struct binder_proc *proc,
924 struct list_head *list)
925{
926 struct binder_work *w;
927
928 binder_inner_proc_lock(proc);
929 w = binder_dequeue_work_head_ilocked(list);
930 binder_inner_proc_unlock(proc);
931 return w;
932}
933
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900934static void
935binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700936static void binder_free_thread(struct binder_thread *thread);
937static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700938static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900939
Todd Kjosf09daf12017-11-10 15:30:27 -0800940struct files_struct *binder_get_files_struct(struct binder_proc *proc)
941{
942 return get_files_struct(proc->tsk);
943}
944
Sachin Kamatefde99c2012-08-17 16:39:36 +0530945static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900946{
Todd Kjosf09daf12017-11-10 15:30:27 -0800947 struct files_struct *files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900948 unsigned long rlim_cur;
949 unsigned long irqs;
Todd Kjosf09daf12017-11-10 15:30:27 -0800950 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900951
Todd Kjosf09daf12017-11-10 15:30:27 -0800952 files = binder_get_files_struct(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900953 if (files == NULL)
954 return -ESRCH;
955
Todd Kjosf09daf12017-11-10 15:30:27 -0800956 if (!lock_task_sighand(proc->tsk, &irqs)) {
957 ret = -EMFILE;
958 goto err;
959 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900960
Al Virodcfadfa2012-08-12 17:27:30 -0400961 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
962 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900963
Todd Kjosf09daf12017-11-10 15:30:27 -0800964 ret = __alloc_fd(files, 0, rlim_cur, flags);
965err:
966 put_files_struct(files);
967 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900968}
969
970/*
971 * copied from fd_install
972 */
973static void task_fd_install(
974 struct binder_proc *proc, unsigned int fd, struct file *file)
975{
Todd Kjosf09daf12017-11-10 15:30:27 -0800976 struct files_struct *files = binder_get_files_struct(proc);
977
978 if (files) {
979 __fd_install(files, fd, file);
980 put_files_struct(files);
981 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900982}
983
984/*
985 * copied from sys_close
986 */
987static long task_close_fd(struct binder_proc *proc, unsigned int fd)
988{
Todd Kjosf09daf12017-11-10 15:30:27 -0800989 struct files_struct *files = binder_get_files_struct(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900990 int retval;
991
Todd Kjosf09daf12017-11-10 15:30:27 -0800992 if (files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900993 return -ESRCH;
994
Todd Kjosf09daf12017-11-10 15:30:27 -0800995 retval = __close_fd(files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900996 /* can't restart close syscall because file table entry was cleared */
997 if (unlikely(retval == -ERESTARTSYS ||
998 retval == -ERESTARTNOINTR ||
999 retval == -ERESTARTNOHAND ||
1000 retval == -ERESTART_RESTARTBLOCK))
1001 retval = -EINTR;
Todd Kjosf09daf12017-11-10 15:30:27 -08001002 put_files_struct(files);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001003
1004 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001005}
1006
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001007static bool binder_has_work_ilocked(struct binder_thread *thread,
1008 bool do_proc_work)
1009{
Martijn Coenen1af61802017-10-19 15:04:46 +02001010 return thread->process_todo ||
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001011 thread->looper_need_return ||
1012 (do_proc_work &&
1013 !binder_worklist_empty_ilocked(&thread->proc->todo));
1014}
1015
1016static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1017{
1018 bool has_work;
1019
1020 binder_inner_proc_lock(thread->proc);
1021 has_work = binder_has_work_ilocked(thread, do_proc_work);
1022 binder_inner_proc_unlock(thread->proc);
1023
1024 return has_work;
1025}
1026
1027static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1028{
1029 return !thread->transaction_stack &&
1030 binder_worklist_empty_ilocked(&thread->todo) &&
1031 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1032 BINDER_LOOPER_STATE_REGISTERED));
1033}
1034
1035static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1036 bool sync)
1037{
1038 struct rb_node *n;
1039 struct binder_thread *thread;
1040
1041 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1042 thread = rb_entry(n, struct binder_thread, rb_node);
1043 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1044 binder_available_for_proc_work_ilocked(thread)) {
1045 if (sync)
1046 wake_up_interruptible_sync(&thread->wait);
1047 else
1048 wake_up_interruptible(&thread->wait);
1049 }
1050 }
1051}
1052
Martijn Coenen053be422017-06-06 15:17:46 -07001053/**
1054 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1055 * @proc: process to select a thread from
1056 *
1057 * Note that calling this function moves the thread off the waiting_threads
1058 * list, so it can only be woken up by the caller of this function, or a
1059 * signal. Therefore, callers *should* always wake up the thread this function
1060 * returns.
1061 *
1062 * Return: If there's a thread currently waiting for process work,
1063 * returns that thread. Otherwise returns NULL.
1064 */
1065static struct binder_thread *
1066binder_select_thread_ilocked(struct binder_proc *proc)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001067{
1068 struct binder_thread *thread;
1069
Martijn Coenened323352017-07-27 23:52:24 +02001070 assert_spin_locked(&proc->inner_lock);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001071 thread = list_first_entry_or_null(&proc->waiting_threads,
1072 struct binder_thread,
1073 waiting_thread_node);
1074
Martijn Coenen053be422017-06-06 15:17:46 -07001075 if (thread)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001076 list_del_init(&thread->waiting_thread_node);
Martijn Coenen053be422017-06-06 15:17:46 -07001077
1078 return thread;
1079}
1080
1081/**
1082 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1083 * @proc: process to wake up a thread in
1084 * @thread: specific thread to wake-up (may be NULL)
1085 * @sync: whether to do a synchronous wake-up
1086 *
1087 * This function wakes up a thread in the @proc process.
1088 * The caller may provide a specific thread to wake-up in
1089 * the @thread parameter. If @thread is NULL, this function
1090 * will wake up threads that have called poll().
1091 *
1092 * Note that for this function to work as expected, callers
1093 * should first call binder_select_thread() to find a thread
1094 * to handle the work (if they don't have a thread already),
1095 * and pass the result into the @thread parameter.
1096 */
1097static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1098 struct binder_thread *thread,
1099 bool sync)
1100{
Martijn Coenened323352017-07-27 23:52:24 +02001101 assert_spin_locked(&proc->inner_lock);
Martijn Coenen053be422017-06-06 15:17:46 -07001102
1103 if (thread) {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001104 if (sync)
1105 wake_up_interruptible_sync(&thread->wait);
1106 else
1107 wake_up_interruptible(&thread->wait);
1108 return;
1109 }
1110
1111 /* Didn't find a thread waiting for proc work; this can happen
1112 * in two scenarios:
1113 * 1. All threads are busy handling transactions
1114 * In that case, one of those threads should call back into
1115 * the kernel driver soon and pick up this work.
1116 * 2. Threads are using the (e)poll interface, in which case
1117 * they may be blocked on the waitqueue without having been
1118 * added to waiting_threads. For this case, we just iterate
1119 * over all threads not handling transaction work, and
1120 * wake them all up. We wake all because we don't know whether
1121 * a thread that called into (e)poll is handling non-binder
1122 * work currently.
1123 */
1124 binder_wakeup_poll_threads_ilocked(proc, sync);
1125}
1126
Martijn Coenen053be422017-06-06 15:17:46 -07001127static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1128{
1129 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1130
1131 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1132}
1133
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001134static bool is_rt_policy(int policy)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001135{
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001136 return policy == SCHED_FIFO || policy == SCHED_RR;
1137}
Seunghun Lee10f62862014-05-01 01:30:23 +09001138
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001139static bool is_fair_policy(int policy)
1140{
1141 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1142}
1143
1144static bool binder_supported_policy(int policy)
1145{
1146 return is_fair_policy(policy) || is_rt_policy(policy);
1147}
1148
1149static int to_userspace_prio(int policy, int kernel_priority)
1150{
1151 if (is_fair_policy(policy))
1152 return PRIO_TO_NICE(kernel_priority);
1153 else
1154 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1155}
1156
1157static int to_kernel_prio(int policy, int user_priority)
1158{
1159 if (is_fair_policy(policy))
1160 return NICE_TO_PRIO(user_priority);
1161 else
1162 return MAX_USER_RT_PRIO - 1 - user_priority;
1163}
1164
Martijn Coenenecd972d2017-05-26 10:48:56 -07001165static void binder_do_set_priority(struct task_struct *task,
1166 struct binder_priority desired,
1167 bool verify)
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001168{
1169 int priority; /* user-space prio value */
1170 bool has_cap_nice;
1171 unsigned int policy = desired.sched_policy;
1172
1173 if (task->policy == policy && task->normal_prio == desired.prio)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001174 return;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001175
1176 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1177
1178 priority = to_userspace_prio(policy, desired.prio);
1179
Martijn Coenenecd972d2017-05-26 10:48:56 -07001180 if (verify && is_rt_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001181 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1182
1183 if (max_rtprio == 0) {
1184 policy = SCHED_NORMAL;
1185 priority = MIN_NICE;
1186 } else if (priority > max_rtprio) {
1187 priority = max_rtprio;
1188 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001189 }
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001190
Martijn Coenenecd972d2017-05-26 10:48:56 -07001191 if (verify && is_fair_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001192 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1193
1194 if (min_nice > MAX_NICE) {
1195 binder_user_error("%d RLIMIT_NICE not set\n",
1196 task->pid);
1197 return;
1198 } else if (priority < min_nice) {
1199 priority = min_nice;
1200 }
1201 }
1202
1203 if (policy != desired.sched_policy ||
1204 to_kernel_prio(policy, priority) != desired.prio)
1205 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1206 "%d: priority %d not allowed, using %d instead\n",
1207 task->pid, desired.prio,
1208 to_kernel_prio(policy, priority));
1209
Martijn Coenen81402ea2017-05-08 09:33:22 -07001210 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1211 to_kernel_prio(policy, priority),
1212 desired.prio);
1213
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001214 /* Set the actual priority */
1215 if (task->policy != policy || is_rt_policy(policy)) {
1216 struct sched_param params;
1217
1218 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1219
1220 sched_setscheduler_nocheck(task,
1221 policy | SCHED_RESET_ON_FORK,
1222 &params);
1223 }
1224 if (is_fair_policy(policy))
1225 set_user_nice(task, priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001226}
1227
Martijn Coenenecd972d2017-05-26 10:48:56 -07001228static void binder_set_priority(struct task_struct *task,
1229 struct binder_priority desired)
1230{
1231 binder_do_set_priority(task, desired, /* verify = */ true);
1232}
1233
1234static void binder_restore_priority(struct task_struct *task,
1235 struct binder_priority desired)
1236{
1237 binder_do_set_priority(task, desired, /* verify = */ false);
1238}
1239
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001240static void binder_transaction_priority(struct task_struct *task,
1241 struct binder_transaction *t,
Martijn Coenenc46810c2017-06-23 10:13:43 -07001242 struct binder_priority node_prio,
1243 bool inherit_rt)
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001244{
Ganesh Mahendran9add7c42017-09-27 15:12:25 +08001245 struct binder_priority desired_prio = t->priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001246
1247 if (t->set_priority_called)
1248 return;
1249
1250 t->set_priority_called = true;
1251 t->saved_priority.sched_policy = task->policy;
1252 t->saved_priority.prio = task->normal_prio;
1253
Martijn Coenenc46810c2017-06-23 10:13:43 -07001254 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1255 desired_prio.prio = NICE_TO_PRIO(0);
1256 desired_prio.sched_policy = SCHED_NORMAL;
Martijn Coenenc46810c2017-06-23 10:13:43 -07001257 }
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001258
1259 if (node_prio.prio < t->priority.prio ||
1260 (node_prio.prio == t->priority.prio &&
1261 node_prio.sched_policy == SCHED_FIFO)) {
1262 /*
1263 * In case the minimum priority on the node is
1264 * higher (lower value), use that priority. If
1265 * the priority is the same, but the node uses
1266 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1267 * run unbounded, unlike SCHED_RR.
1268 */
1269 desired_prio = node_prio;
1270 }
1271
1272 binder_set_priority(task, desired_prio);
1273}
1274
Todd Kjos425d23f2017-06-12 12:07:26 -07001275static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1276 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001277{
1278 struct rb_node *n = proc->nodes.rb_node;
1279 struct binder_node *node;
1280
Martijn Coenened323352017-07-27 23:52:24 +02001281 assert_spin_locked(&proc->inner_lock);
Todd Kjos425d23f2017-06-12 12:07:26 -07001282
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001283 while (n) {
1284 node = rb_entry(n, struct binder_node, rb_node);
1285
1286 if (ptr < node->ptr)
1287 n = n->rb_left;
1288 else if (ptr > node->ptr)
1289 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -07001290 else {
1291 /*
1292 * take an implicit weak reference
1293 * to ensure node stays alive until
1294 * call to binder_put_node()
1295 */
Todd Kjos425d23f2017-06-12 12:07:26 -07001296 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001297 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001298 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001299 }
1300 return NULL;
1301}
1302
Todd Kjos425d23f2017-06-12 12:07:26 -07001303static struct binder_node *binder_get_node(struct binder_proc *proc,
1304 binder_uintptr_t ptr)
1305{
1306 struct binder_node *node;
1307
1308 binder_inner_proc_lock(proc);
1309 node = binder_get_node_ilocked(proc, ptr);
1310 binder_inner_proc_unlock(proc);
1311 return node;
1312}
1313
1314static struct binder_node *binder_init_node_ilocked(
1315 struct binder_proc *proc,
1316 struct binder_node *new_node,
1317 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001318{
1319 struct rb_node **p = &proc->nodes.rb_node;
1320 struct rb_node *parent = NULL;
1321 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001322 binder_uintptr_t ptr = fp ? fp->binder : 0;
1323 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1324 __u32 flags = fp ? fp->flags : 0;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001325 s8 priority;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001326
Martijn Coenened323352017-07-27 23:52:24 +02001327 assert_spin_locked(&proc->inner_lock);
1328
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001329 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001330
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001331 parent = *p;
1332 node = rb_entry(parent, struct binder_node, rb_node);
1333
1334 if (ptr < node->ptr)
1335 p = &(*p)->rb_left;
1336 else if (ptr > node->ptr)
1337 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001338 else {
1339 /*
1340 * A matching node is already in
1341 * the rb tree. Abandon the init
1342 * and return it.
1343 */
1344 binder_inc_node_tmpref_ilocked(node);
1345 return node;
1346 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001347 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001348 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001349 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001350 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001351 rb_link_node(&node->rb_node, parent, p);
1352 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001353 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001354 node->proc = proc;
1355 node->ptr = ptr;
1356 node->cookie = cookie;
1357 node->work.type = BINDER_WORK_NODE;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001358 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
Ganesh Mahendran6cd26312017-09-26 17:56:25 +08001359 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
Martijn Coenen6aac9792017-06-07 09:29:14 -07001360 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1361 node->min_priority = to_kernel_prio(node->sched_policy, priority);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001362 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Martijn Coenenc46810c2017-06-23 10:13:43 -07001363 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001364 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001365 INIT_LIST_HEAD(&node->work.entry);
1366 INIT_LIST_HEAD(&node->async_todo);
1367 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001368 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001369 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001370 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001371
1372 return node;
1373}
1374
1375static struct binder_node *binder_new_node(struct binder_proc *proc,
1376 struct flat_binder_object *fp)
1377{
1378 struct binder_node *node;
1379 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1380
1381 if (!new_node)
1382 return NULL;
1383 binder_inner_proc_lock(proc);
1384 node = binder_init_node_ilocked(proc, new_node, fp);
1385 binder_inner_proc_unlock(proc);
1386 if (node != new_node)
1387 /*
1388 * The node was already added by another thread
1389 */
1390 kfree(new_node);
1391
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001392 return node;
1393}
1394
Todd Kjose7f23ed2017-03-21 13:06:01 -07001395static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001396{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001397 kfree(node);
1398 binder_stats_deleted(BINDER_STAT_NODE);
1399}
1400
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001401static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1402 int internal,
1403 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001404{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001405 struct binder_proc *proc = node->proc;
1406
Martijn Coenened323352017-07-27 23:52:24 +02001407 assert_spin_locked(&node->lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001408 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001409 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001410 if (strong) {
1411 if (internal) {
1412 if (target_list == NULL &&
1413 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001414 !(node->proc &&
1415 node == node->proc->context->
1416 binder_context_mgr_node &&
1417 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301418 pr_err("invalid inc strong node for %d\n",
1419 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001420 return -EINVAL;
1421 }
1422 node->internal_strong_refs++;
1423 } else
1424 node->local_strong_refs++;
1425 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001426 binder_dequeue_work_ilocked(&node->work);
Martijn Coenen1af61802017-10-19 15:04:46 +02001427 /*
1428 * Note: this function is the only place where we queue
1429 * directly to a thread->todo without using the
1430 * corresponding binder_enqueue_thread_work() helper
1431 * functions; in this case it's ok to not set the
1432 * process_todo flag, since we know this node work will
1433 * always be followed by other work that starts queue
1434 * processing: in case of synchronous transactions, a
1435 * BR_REPLY or BR_ERROR; in case of oneway
1436 * transactions, a BR_TRANSACTION_COMPLETE.
1437 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001438 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001439 }
1440 } else {
1441 if (!internal)
1442 node->local_weak_refs++;
1443 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1444 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301445 pr_err("invalid inc weak node for %d\n",
1446 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001447 return -EINVAL;
1448 }
Martijn Coenen1af61802017-10-19 15:04:46 +02001449 /*
1450 * See comment above
1451 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001452 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001453 }
1454 }
1455 return 0;
1456}
1457
Todd Kjose7f23ed2017-03-21 13:06:01 -07001458static int binder_inc_node(struct binder_node *node, int strong, int internal,
1459 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001460{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001461 int ret;
1462
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001463 binder_node_inner_lock(node);
1464 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1465 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001466
1467 return ret;
1468}
1469
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001470static bool binder_dec_node_nilocked(struct binder_node *node,
1471 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001472{
1473 struct binder_proc *proc = node->proc;
1474
Martijn Coenened323352017-07-27 23:52:24 +02001475 assert_spin_locked(&node->lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001476 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001477 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001478 if (strong) {
1479 if (internal)
1480 node->internal_strong_refs--;
1481 else
1482 node->local_strong_refs--;
1483 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001484 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001485 } else {
1486 if (!internal)
1487 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001488 if (node->local_weak_refs || node->tmp_refs ||
1489 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001490 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001491 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001492
1493 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001494 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001495 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07001496 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001497 }
1498 } else {
1499 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001500 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001501 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001502 binder_dequeue_work_ilocked(&node->work);
1503 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001504 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301505 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001506 node->debug_id);
1507 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001508 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001509 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001510 /*
1511 * tmp_refs could have changed so
1512 * check it again
1513 */
1514 if (node->tmp_refs) {
1515 spin_unlock(&binder_dead_nodes_lock);
1516 return false;
1517 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001518 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001519 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001520 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301521 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001522 node->debug_id);
1523 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001524 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001525 }
1526 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001527 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001528}
1529
Todd Kjose7f23ed2017-03-21 13:06:01 -07001530static void binder_dec_node(struct binder_node *node, int strong, int internal)
1531{
1532 bool free_node;
1533
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001534 binder_node_inner_lock(node);
1535 free_node = binder_dec_node_nilocked(node, strong, internal);
1536 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001537 if (free_node)
1538 binder_free_node(node);
1539}
1540
1541static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001542{
1543 /*
1544 * No call to binder_inc_node() is needed since we
1545 * don't need to inform userspace of any changes to
1546 * tmp_refs
1547 */
1548 node->tmp_refs++;
1549}
1550
1551/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001552 * binder_inc_node_tmpref() - take a temporary reference on node
1553 * @node: node to reference
1554 *
1555 * Take reference on node to prevent the node from being freed
1556 * while referenced only by a local variable. The inner lock is
1557 * needed to serialize with the node work on the queue (which
1558 * isn't needed after the node is dead). If the node is dead
1559 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1560 * node->tmp_refs against dead-node-only cases where the node
1561 * lock cannot be acquired (eg traversing the dead node list to
1562 * print nodes)
1563 */
1564static void binder_inc_node_tmpref(struct binder_node *node)
1565{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001566 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001567 if (node->proc)
1568 binder_inner_proc_lock(node->proc);
1569 else
1570 spin_lock(&binder_dead_nodes_lock);
1571 binder_inc_node_tmpref_ilocked(node);
1572 if (node->proc)
1573 binder_inner_proc_unlock(node->proc);
1574 else
1575 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001576 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001577}
1578
1579/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001580 * binder_dec_node_tmpref() - remove a temporary reference on node
1581 * @node: node to reference
1582 *
1583 * Release temporary reference on node taken via binder_inc_node_tmpref()
1584 */
1585static void binder_dec_node_tmpref(struct binder_node *node)
1586{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001587 bool free_node;
1588
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001589 binder_node_inner_lock(node);
1590 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001591 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001592 node->tmp_refs--;
1593 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001594 if (!node->proc)
1595 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001596 /*
1597 * Call binder_dec_node() to check if all refcounts are 0
1598 * and cleanup is needed. Calling with strong=0 and internal=1
1599 * causes no actual reference to be released in binder_dec_node().
1600 * If that changes, a change is needed here too.
1601 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001602 free_node = binder_dec_node_nilocked(node, 0, 1);
1603 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001604 if (free_node)
1605 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001606}
1607
1608static void binder_put_node(struct binder_node *node)
1609{
1610 binder_dec_node_tmpref(node);
1611}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001612
Todd Kjos5346bf32016-10-20 16:43:34 -07001613static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1614 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001615{
1616 struct rb_node *n = proc->refs_by_desc.rb_node;
1617 struct binder_ref *ref;
1618
1619 while (n) {
1620 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1621
Todd Kjosb0117bb2017-05-08 09:16:27 -07001622 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001623 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001624 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001625 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001626 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001627 binder_user_error("tried to use weak ref as strong ref\n");
1628 return NULL;
1629 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001630 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001631 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001632 }
1633 return NULL;
1634}
1635
Todd Kjosb0117bb2017-05-08 09:16:27 -07001636/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001637 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001638 * @proc: binder_proc that owns the ref
1639 * @node: binder_node of target
1640 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1641 *
1642 * Look up the ref for the given node and return it if it exists
1643 *
1644 * If it doesn't exist and the caller provides a newly allocated
1645 * ref, initialize the fields of the newly allocated ref and insert
1646 * into the given proc rb_trees and node refs list.
1647 *
1648 * Return: the ref for node. It is possible that another thread
1649 * allocated/initialized the ref first in which case the
1650 * returned ref would be different than the passed-in
1651 * new_ref. new_ref must be kfree'd by the caller in
1652 * this case.
1653 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001654static struct binder_ref *binder_get_ref_for_node_olocked(
1655 struct binder_proc *proc,
1656 struct binder_node *node,
1657 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001658{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001659 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001660 struct rb_node **p = &proc->refs_by_node.rb_node;
1661 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001662 struct binder_ref *ref;
1663 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001664
1665 while (*p) {
1666 parent = *p;
1667 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1668
1669 if (node < ref->node)
1670 p = &(*p)->rb_left;
1671 else if (node > ref->node)
1672 p = &(*p)->rb_right;
1673 else
1674 return ref;
1675 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001676 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001677 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001678
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001679 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001680 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001681 new_ref->proc = proc;
1682 new_ref->node = node;
1683 rb_link_node(&new_ref->rb_node_node, parent, p);
1684 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1685
Todd Kjosb0117bb2017-05-08 09:16:27 -07001686 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001687 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1688 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001689 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001690 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001691 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001692 }
1693
1694 p = &proc->refs_by_desc.rb_node;
1695 while (*p) {
1696 parent = *p;
1697 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1698
Todd Kjosb0117bb2017-05-08 09:16:27 -07001699 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001700 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001701 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001702 p = &(*p)->rb_right;
1703 else
1704 BUG();
1705 }
1706 rb_link_node(&new_ref->rb_node_desc, parent, p);
1707 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001708
1709 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001710 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001711
Todd Kjos4cbe5752017-05-01 17:21:51 -07001712 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1713 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001714 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001715 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001716 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001717 return new_ref;
1718}
1719
Todd Kjos5346bf32016-10-20 16:43:34 -07001720static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001721{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001722 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001723
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001724 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301725 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001726 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301727 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001728
1729 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1730 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001731
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001732 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001733 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001734 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001735
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001736 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001737 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1738 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001739 /*
1740 * Clear ref->node unless we want the caller to free the node
1741 */
1742 if (!delete_node) {
1743 /*
1744 * The caller uses ref->node to determine
1745 * whether the node needs to be freed. Clear
1746 * it since the node is still alive.
1747 */
1748 ref->node = NULL;
1749 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001750
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001751 if (ref->death) {
1752 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301753 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001754 ref->proc->pid, ref->data.debug_id,
1755 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001756 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001757 binder_stats_deleted(BINDER_STAT_DEATH);
1758 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001759 binder_stats_deleted(BINDER_STAT_REF);
1760}
1761
Todd Kjosb0117bb2017-05-08 09:16:27 -07001762/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001763 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001764 * @ref: ref to be incremented
1765 * @strong: if true, strong increment, else weak
1766 * @target_list: list to queue node work on
1767 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001768 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001769 *
1770 * Return: 0, if successful, else errno
1771 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001772static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1773 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001774{
1775 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001776
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001777 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001778 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001779 ret = binder_inc_node(ref->node, 1, 1, target_list);
1780 if (ret)
1781 return ret;
1782 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001783 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001784 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001785 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001786 ret = binder_inc_node(ref->node, 0, 1, target_list);
1787 if (ret)
1788 return ret;
1789 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001790 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001791 }
1792 return 0;
1793}
1794
Todd Kjosb0117bb2017-05-08 09:16:27 -07001795/**
1796 * binder_dec_ref() - dec the ref for given handle
1797 * @ref: ref to be decremented
1798 * @strong: if true, strong decrement, else weak
1799 *
1800 * Decrement the ref.
1801 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001802 * Return: true if ref is cleaned up and ready to be freed
1803 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001804static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001805{
1806 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001807 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301808 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001809 ref->proc->pid, ref->data.debug_id,
1810 ref->data.desc, ref->data.strong,
1811 ref->data.weak);
1812 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001813 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001814 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001815 if (ref->data.strong == 0)
1816 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001817 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001818 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301819 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001820 ref->proc->pid, ref->data.debug_id,
1821 ref->data.desc, ref->data.strong,
1822 ref->data.weak);
1823 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001824 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001825 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001826 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001827 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001828 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001829 return true;
1830 }
1831 return false;
1832}
1833
1834/**
1835 * binder_get_node_from_ref() - get the node from the given proc/desc
1836 * @proc: proc containing the ref
1837 * @desc: the handle associated with the ref
1838 * @need_strong_ref: if true, only return node if ref is strong
1839 * @rdata: the id/refcount data for the ref
1840 *
1841 * Given a proc and ref handle, return the associated binder_node
1842 *
1843 * Return: a binder_node or NULL if not found or not strong when strong required
1844 */
1845static struct binder_node *binder_get_node_from_ref(
1846 struct binder_proc *proc,
1847 u32 desc, bool need_strong_ref,
1848 struct binder_ref_data *rdata)
1849{
1850 struct binder_node *node;
1851 struct binder_ref *ref;
1852
Todd Kjos5346bf32016-10-20 16:43:34 -07001853 binder_proc_lock(proc);
1854 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001855 if (!ref)
1856 goto err_no_ref;
1857 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001858 /*
1859 * Take an implicit reference on the node to ensure
1860 * it stays alive until the call to binder_put_node()
1861 */
1862 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001863 if (rdata)
1864 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001865 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001866
1867 return node;
1868
1869err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001870 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001871 return NULL;
1872}
1873
1874/**
1875 * binder_free_ref() - free the binder_ref
1876 * @ref: ref to free
1877 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001878 * Free the binder_ref. Free the binder_node indicated by ref->node
1879 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001880 */
1881static void binder_free_ref(struct binder_ref *ref)
1882{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001883 if (ref->node)
1884 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001885 kfree(ref->death);
1886 kfree(ref);
1887}
1888
1889/**
1890 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1891 * @proc: proc containing the ref
1892 * @desc: the handle associated with the ref
1893 * @increment: true=inc reference, false=dec reference
1894 * @strong: true=strong reference, false=weak reference
1895 * @rdata: the id/refcount data for the ref
1896 *
1897 * Given a proc and ref handle, increment or decrement the ref
1898 * according to "increment" arg.
1899 *
1900 * Return: 0 if successful, else errno
1901 */
1902static int binder_update_ref_for_handle(struct binder_proc *proc,
1903 uint32_t desc, bool increment, bool strong,
1904 struct binder_ref_data *rdata)
1905{
1906 int ret = 0;
1907 struct binder_ref *ref;
1908 bool delete_ref = false;
1909
Todd Kjos5346bf32016-10-20 16:43:34 -07001910 binder_proc_lock(proc);
1911 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001912 if (!ref) {
1913 ret = -EINVAL;
1914 goto err_no_ref;
1915 }
1916 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001917 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001918 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001919 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001920
1921 if (rdata)
1922 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001923 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001924
1925 if (delete_ref)
1926 binder_free_ref(ref);
1927 return ret;
1928
1929err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001930 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001931 return ret;
1932}
1933
1934/**
1935 * binder_dec_ref_for_handle() - dec the ref for given handle
1936 * @proc: proc containing the ref
1937 * @desc: the handle associated with the ref
1938 * @strong: true=strong reference, false=weak reference
1939 * @rdata: the id/refcount data for the ref
1940 *
1941 * Just calls binder_update_ref_for_handle() to decrement the ref.
1942 *
1943 * Return: 0 if successful, else errno
1944 */
1945static int binder_dec_ref_for_handle(struct binder_proc *proc,
1946 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1947{
1948 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1949}
1950
1951
1952/**
1953 * binder_inc_ref_for_node() - increment the ref for given proc/node
1954 * @proc: proc containing the ref
1955 * @node: target node
1956 * @strong: true=strong reference, false=weak reference
1957 * @target_list: worklist to use if node is incremented
1958 * @rdata: the id/refcount data for the ref
1959 *
1960 * Given a proc and node, increment the ref. Create the ref if it
1961 * doesn't already exist
1962 *
1963 * Return: 0 if successful, else errno
1964 */
1965static int binder_inc_ref_for_node(struct binder_proc *proc,
1966 struct binder_node *node,
1967 bool strong,
1968 struct list_head *target_list,
1969 struct binder_ref_data *rdata)
1970{
1971 struct binder_ref *ref;
1972 struct binder_ref *new_ref = NULL;
1973 int ret = 0;
1974
Todd Kjos5346bf32016-10-20 16:43:34 -07001975 binder_proc_lock(proc);
1976 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001977 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001978 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001979 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1980 if (!new_ref)
1981 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001982 binder_proc_lock(proc);
1983 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001984 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001985 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001986 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001987 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001988 if (new_ref && ref != new_ref)
1989 /*
1990 * Another thread created the ref first so
1991 * free the one we allocated
1992 */
1993 kfree(new_ref);
1994 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001995}
1996
Martijn Coenen995a36e2017-06-02 13:36:52 -07001997static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1998 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001999{
Todd Kjos21ef40a2017-03-30 18:02:13 -07002000 BUG_ON(!target_thread);
Martijn Coenened323352017-07-27 23:52:24 +02002001 assert_spin_locked(&target_thread->proc->inner_lock);
Todd Kjos21ef40a2017-03-30 18:02:13 -07002002 BUG_ON(target_thread->transaction_stack != t);
2003 BUG_ON(target_thread->transaction_stack->from != target_thread);
2004 target_thread->transaction_stack =
2005 target_thread->transaction_stack->from_parent;
2006 t->from = NULL;
2007}
2008
Todd Kjos2f993e22017-05-12 14:42:55 -07002009/**
2010 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2011 * @thread: thread to decrement
2012 *
2013 * A thread needs to be kept alive while being used to create or
2014 * handle a transaction. binder_get_txn_from() is used to safely
2015 * extract t->from from a binder_transaction and keep the thread
2016 * indicated by t->from from being freed. When done with that
2017 * binder_thread, this function is called to decrement the
2018 * tmp_ref and free if appropriate (thread has been released
2019 * and no transaction being processed by the driver)
2020 */
2021static void binder_thread_dec_tmpref(struct binder_thread *thread)
2022{
2023 /*
2024 * atomic is used to protect the counter value while
2025 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07002026 */
Todd Kjosb4827902017-05-25 15:52:17 -07002027 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002028 atomic_dec(&thread->tmp_ref);
2029 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07002030 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002031 binder_free_thread(thread);
2032 return;
2033 }
Todd Kjosb4827902017-05-25 15:52:17 -07002034 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002035}
2036
2037/**
2038 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2039 * @proc: proc to decrement
2040 *
2041 * A binder_proc needs to be kept alive while being used to create or
2042 * handle a transaction. proc->tmp_ref is incremented when
2043 * creating a new transaction or the binder_proc is currently in-use
2044 * by threads that are being released. When done with the binder_proc,
2045 * this function is called to decrement the counter and free the
2046 * proc if appropriate (proc has been released, all threads have
2047 * been released and not currenly in-use to process a transaction).
2048 */
2049static void binder_proc_dec_tmpref(struct binder_proc *proc)
2050{
Todd Kjosb4827902017-05-25 15:52:17 -07002051 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002052 proc->tmp_ref--;
2053 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2054 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07002055 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002056 binder_free_proc(proc);
2057 return;
2058 }
Todd Kjosb4827902017-05-25 15:52:17 -07002059 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002060}
2061
2062/**
2063 * binder_get_txn_from() - safely extract the "from" thread in transaction
2064 * @t: binder transaction for t->from
2065 *
2066 * Atomically return the "from" thread and increment the tmp_ref
2067 * count for the thread to ensure it stays alive until
2068 * binder_thread_dec_tmpref() is called.
2069 *
2070 * Return: the value of t->from
2071 */
2072static struct binder_thread *binder_get_txn_from(
2073 struct binder_transaction *t)
2074{
2075 struct binder_thread *from;
2076
2077 spin_lock(&t->lock);
2078 from = t->from;
2079 if (from)
2080 atomic_inc(&from->tmp_ref);
2081 spin_unlock(&t->lock);
2082 return from;
2083}
2084
Martijn Coenen995a36e2017-06-02 13:36:52 -07002085/**
2086 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2087 * @t: binder transaction for t->from
2088 *
2089 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2090 * to guarantee that the thread cannot be released while operating on it.
2091 * The caller must call binder_inner_proc_unlock() to release the inner lock
2092 * as well as call binder_dec_thread_txn() to release the reference.
2093 *
2094 * Return: the value of t->from
2095 */
2096static struct binder_thread *binder_get_txn_from_and_acq_inner(
2097 struct binder_transaction *t)
2098{
2099 struct binder_thread *from;
2100
2101 from = binder_get_txn_from(t);
2102 if (!from)
2103 return NULL;
2104 binder_inner_proc_lock(from->proc);
2105 if (t->from) {
2106 BUG_ON(from != t->from);
2107 return from;
2108 }
2109 binder_inner_proc_unlock(from->proc);
2110 binder_thread_dec_tmpref(from);
2111 return NULL;
2112}
2113
Todd Kjos21ef40a2017-03-30 18:02:13 -07002114static void binder_free_transaction(struct binder_transaction *t)
2115{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002116 if (t->buffer)
2117 t->buffer->transaction = NULL;
2118 kfree(t);
2119 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2120}
2121
2122static void binder_send_failed_reply(struct binder_transaction *t,
2123 uint32_t error_code)
2124{
2125 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002126 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09002127
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002128 BUG_ON(t->flags & TF_ONE_WAY);
2129 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002130 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002131 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002132 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2133 "send failed reply for transaction %d to %d:%d\n",
2134 t->debug_id,
2135 target_thread->proc->pid,
2136 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002137
Martijn Coenen995a36e2017-06-02 13:36:52 -07002138 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07002139 if (target_thread->reply_error.cmd == BR_OK) {
2140 target_thread->reply_error.cmd = error_code;
Martijn Coenen1af61802017-10-19 15:04:46 +02002141 binder_enqueue_thread_work_ilocked(
2142 target_thread,
2143 &target_thread->reply_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002144 wake_up_interruptible(&target_thread->wait);
2145 } else {
Todd Kjosd3a2afb2018-02-07 12:38:47 -08002146 /*
2147 * Cannot get here for normal operation, but
2148 * we can if multiple synchronous transactions
2149 * are sent without blocking for responses.
2150 * Just ignore the 2nd error in this case.
2151 */
2152 pr_warn("Unexpected reply error: %u\n",
2153 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002154 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002155 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002156 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07002157 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002158 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002159 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002160 next = t->from_parent;
2161
2162 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2163 "send failed reply for transaction %d, target dead\n",
2164 t->debug_id);
2165
Todd Kjos21ef40a2017-03-30 18:02:13 -07002166 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002167 if (next == NULL) {
2168 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2169 "reply failed, no target thread at root\n");
2170 return;
2171 }
2172 t = next;
2173 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2174 "reply failed, no target thread -- retry %d\n",
2175 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002176 }
2177}
2178
Martijn Coenen00c80372016-07-13 12:06:49 +02002179/**
Martijn Coenen3217ccc2017-08-24 15:23:36 +02002180 * binder_cleanup_transaction() - cleans up undelivered transaction
2181 * @t: transaction that needs to be cleaned up
2182 * @reason: reason the transaction wasn't delivered
2183 * @error_code: error to return to caller (if synchronous call)
2184 */
2185static void binder_cleanup_transaction(struct binder_transaction *t,
2186 const char *reason,
2187 uint32_t error_code)
2188{
2189 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2190 binder_send_failed_reply(t, error_code);
2191 } else {
2192 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2193 "undelivered transaction %d, %s\n",
2194 t->debug_id, reason);
2195 binder_free_transaction(t);
2196 }
2197}
2198
2199/**
Martijn Coenen00c80372016-07-13 12:06:49 +02002200 * binder_validate_object() - checks for a valid metadata object in a buffer.
2201 * @buffer: binder_buffer that we're parsing.
2202 * @offset: offset in the buffer at which to validate an object.
2203 *
2204 * Return: If there's a valid metadata object at @offset in @buffer, the
2205 * size of that object. Otherwise, it returns zero.
2206 */
2207static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2208{
2209 /* Check if we can read a header first */
2210 struct binder_object_header *hdr;
2211 size_t object_size = 0;
2212
Dan Carpentera1996892018-03-29 12:14:40 +03002213 if (buffer->data_size < sizeof(*hdr) ||
2214 offset > buffer->data_size - sizeof(*hdr) ||
Martijn Coenen00c80372016-07-13 12:06:49 +02002215 !IS_ALIGNED(offset, sizeof(u32)))
2216 return 0;
2217
2218 /* Ok, now see if we can read a complete object. */
2219 hdr = (struct binder_object_header *)(buffer->data + offset);
2220 switch (hdr->type) {
2221 case BINDER_TYPE_BINDER:
2222 case BINDER_TYPE_WEAK_BINDER:
2223 case BINDER_TYPE_HANDLE:
2224 case BINDER_TYPE_WEAK_HANDLE:
2225 object_size = sizeof(struct flat_binder_object);
2226 break;
2227 case BINDER_TYPE_FD:
2228 object_size = sizeof(struct binder_fd_object);
2229 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002230 case BINDER_TYPE_PTR:
2231 object_size = sizeof(struct binder_buffer_object);
2232 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002233 case BINDER_TYPE_FDA:
2234 object_size = sizeof(struct binder_fd_array_object);
2235 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02002236 default:
2237 return 0;
2238 }
2239 if (offset <= buffer->data_size - object_size &&
2240 buffer->data_size >= object_size)
2241 return object_size;
2242 else
2243 return 0;
2244}
2245
Martijn Coenen5a6da532016-09-30 14:10:07 +02002246/**
2247 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2248 * @b: binder_buffer containing the object
2249 * @index: index in offset array at which the binder_buffer_object is
2250 * located
2251 * @start: points to the start of the offset array
2252 * @num_valid: the number of valid offsets in the offset array
2253 *
2254 * Return: If @index is within the valid range of the offset array
2255 * described by @start and @num_valid, and if there's a valid
2256 * binder_buffer_object at the offset found in index @index
2257 * of the offset array, that object is returned. Otherwise,
2258 * %NULL is returned.
2259 * Note that the offset found in index @index itself is not
2260 * verified; this function assumes that @num_valid elements
2261 * from @start were previously verified to have valid offsets.
2262 */
2263static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2264 binder_size_t index,
2265 binder_size_t *start,
2266 binder_size_t num_valid)
2267{
2268 struct binder_buffer_object *buffer_obj;
2269 binder_size_t *offp;
2270
2271 if (index >= num_valid)
2272 return NULL;
2273
2274 offp = start + index;
2275 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2276 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2277 return NULL;
2278
2279 return buffer_obj;
2280}
2281
2282/**
2283 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2284 * @b: transaction buffer
2285 * @objects_start start of objects buffer
2286 * @buffer: binder_buffer_object in which to fix up
2287 * @offset: start offset in @buffer to fix up
2288 * @last_obj: last binder_buffer_object that we fixed up in
2289 * @last_min_offset: minimum fixup offset in @last_obj
2290 *
2291 * Return: %true if a fixup in buffer @buffer at offset @offset is
2292 * allowed.
2293 *
2294 * For safety reasons, we only allow fixups inside a buffer to happen
2295 * at increasing offsets; additionally, we only allow fixup on the last
2296 * buffer object that was verified, or one of its parents.
2297 *
2298 * Example of what is allowed:
2299 *
2300 * A
2301 * B (parent = A, offset = 0)
2302 * C (parent = A, offset = 16)
2303 * D (parent = C, offset = 0)
2304 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2305 *
2306 * Examples of what is not allowed:
2307 *
2308 * Decreasing offsets within the same parent:
2309 * A
2310 * C (parent = A, offset = 16)
2311 * B (parent = A, offset = 0) // decreasing offset within A
2312 *
2313 * Referring to a parent that wasn't the last object or any of its parents:
2314 * A
2315 * B (parent = A, offset = 0)
2316 * C (parent = A, offset = 0)
2317 * C (parent = A, offset = 16)
2318 * D (parent = B, offset = 0) // B is not A or any of A's parents
2319 */
2320static bool binder_validate_fixup(struct binder_buffer *b,
2321 binder_size_t *objects_start,
2322 struct binder_buffer_object *buffer,
2323 binder_size_t fixup_offset,
2324 struct binder_buffer_object *last_obj,
2325 binder_size_t last_min_offset)
2326{
2327 if (!last_obj) {
2328 /* Nothing to fix up in */
2329 return false;
2330 }
2331
2332 while (last_obj != buffer) {
2333 /*
2334 * Safe to retrieve the parent of last_obj, since it
2335 * was already previously verified by the driver.
2336 */
2337 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2338 return false;
2339 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2340 last_obj = (struct binder_buffer_object *)
2341 (b->data + *(objects_start + last_obj->parent));
2342 }
2343 return (fixup_offset >= last_min_offset);
2344}
2345
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002346static void binder_transaction_buffer_release(struct binder_proc *proc,
2347 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002348 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002349{
Martijn Coenen5a6da532016-09-30 14:10:07 +02002350 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002351 int debug_id = buffer->debug_id;
2352
2353 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302354 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002355 proc->pid, buffer->debug_id,
2356 buffer->data_size, buffer->offsets_size, failed_at);
2357
2358 if (buffer->target_node)
2359 binder_dec_node(buffer->target_node, 1, 0);
2360
Martijn Coenen5a6da532016-09-30 14:10:07 +02002361 off_start = (binder_size_t *)(buffer->data +
2362 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002363 if (failed_at)
2364 off_end = failed_at;
2365 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002366 off_end = (void *)off_start + buffer->offsets_size;
2367 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002368 struct binder_object_header *hdr;
2369 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002370
Martijn Coenen00c80372016-07-13 12:06:49 +02002371 if (object_size == 0) {
2372 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002373 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002374 continue;
2375 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002376 hdr = (struct binder_object_header *)(buffer->data + *offp);
2377 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002378 case BINDER_TYPE_BINDER:
2379 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002380 struct flat_binder_object *fp;
2381 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002382
Martijn Coenen00c80372016-07-13 12:06:49 +02002383 fp = to_flat_binder_object(hdr);
2384 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002385 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002386 pr_err("transaction release %d bad node %016llx\n",
2387 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002388 break;
2389 }
2390 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002391 " node %d u%016llx\n",
2392 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002393 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2394 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002395 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002396 } break;
2397 case BINDER_TYPE_HANDLE:
2398 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002399 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002400 struct binder_ref_data rdata;
2401 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002402
Martijn Coenen00c80372016-07-13 12:06:49 +02002403 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002404 ret = binder_dec_ref_for_handle(proc, fp->handle,
2405 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2406
2407 if (ret) {
2408 pr_err("transaction release %d bad handle %d, ret = %d\n",
2409 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002410 break;
2411 }
2412 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002413 " ref %d desc %d\n",
2414 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002415 } break;
2416
Martijn Coenen00c80372016-07-13 12:06:49 +02002417 case BINDER_TYPE_FD: {
2418 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2419
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002420 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002421 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002422 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002423 task_close_fd(proc, fp->fd);
2424 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002425 case BINDER_TYPE_PTR:
2426 /*
2427 * Nothing to do here, this will get cleaned up when the
2428 * transaction buffer gets freed
2429 */
2430 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002431 case BINDER_TYPE_FDA: {
2432 struct binder_fd_array_object *fda;
2433 struct binder_buffer_object *parent;
2434 uintptr_t parent_buffer;
2435 u32 *fd_array;
2436 size_t fd_index;
2437 binder_size_t fd_buf_size;
2438
2439 fda = to_binder_fd_array_object(hdr);
2440 parent = binder_validate_ptr(buffer, fda->parent,
2441 off_start,
2442 offp - off_start);
2443 if (!parent) {
2444 pr_err("transaction release %d bad parent offset",
2445 debug_id);
2446 continue;
2447 }
2448 /*
2449 * Since the parent was already fixed up, convert it
2450 * back to kernel address space to access it
2451 */
2452 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002453 binder_alloc_get_user_buffer_offset(
2454 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002455
2456 fd_buf_size = sizeof(u32) * fda->num_fds;
2457 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2458 pr_err("transaction release %d invalid number of fds (%lld)\n",
2459 debug_id, (u64)fda->num_fds);
2460 continue;
2461 }
2462 if (fd_buf_size > parent->length ||
2463 fda->parent_offset > parent->length - fd_buf_size) {
2464 /* No space for all file descriptors here. */
2465 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2466 debug_id, (u64)fda->num_fds);
2467 continue;
2468 }
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002469 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002470 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2471 task_close_fd(proc, fd_array[fd_index]);
2472 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002473 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002474 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002475 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002476 break;
2477 }
2478 }
2479}
2480
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002481static int binder_translate_binder(struct flat_binder_object *fp,
2482 struct binder_transaction *t,
2483 struct binder_thread *thread)
2484{
2485 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002486 struct binder_proc *proc = thread->proc;
2487 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002488 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002489 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002490
2491 node = binder_get_node(proc, fp->binder);
2492 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002493 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002494 if (!node)
2495 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002496 }
2497 if (fp->cookie != node->cookie) {
2498 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2499 proc->pid, thread->pid, (u64)fp->binder,
2500 node->debug_id, (u64)fp->cookie,
2501 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002502 ret = -EINVAL;
2503 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002504 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002505 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2506 ret = -EPERM;
2507 goto done;
2508 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002509
Todd Kjosb0117bb2017-05-08 09:16:27 -07002510 ret = binder_inc_ref_for_node(target_proc, node,
2511 fp->hdr.type == BINDER_TYPE_BINDER,
2512 &thread->todo, &rdata);
2513 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002514 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002515
2516 if (fp->hdr.type == BINDER_TYPE_BINDER)
2517 fp->hdr.type = BINDER_TYPE_HANDLE;
2518 else
2519 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2520 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002521 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002522 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002523
Todd Kjosb0117bb2017-05-08 09:16:27 -07002524 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002525 binder_debug(BINDER_DEBUG_TRANSACTION,
2526 " node %d u%016llx -> ref %d desc %d\n",
2527 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002528 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002529done:
2530 binder_put_node(node);
2531 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002532}
2533
2534static int binder_translate_handle(struct flat_binder_object *fp,
2535 struct binder_transaction *t,
2536 struct binder_thread *thread)
2537{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002538 struct binder_proc *proc = thread->proc;
2539 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002540 struct binder_node *node;
2541 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002542 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002543
Todd Kjosb0117bb2017-05-08 09:16:27 -07002544 node = binder_get_node_from_ref(proc, fp->handle,
2545 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2546 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002547 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2548 proc->pid, thread->pid, fp->handle);
2549 return -EINVAL;
2550 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002551 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2552 ret = -EPERM;
2553 goto done;
2554 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002555
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002556 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002557 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002558 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2559 fp->hdr.type = BINDER_TYPE_BINDER;
2560 else
2561 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002562 fp->binder = node->ptr;
2563 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002564 if (node->proc)
2565 binder_inner_proc_lock(node->proc);
2566 binder_inc_node_nilocked(node,
2567 fp->hdr.type == BINDER_TYPE_BINDER,
2568 0, NULL);
2569 if (node->proc)
2570 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002571 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002572 binder_debug(BINDER_DEBUG_TRANSACTION,
2573 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002574 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2575 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002576 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002577 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002578 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002579
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002580 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002581 ret = binder_inc_ref_for_node(target_proc, node,
2582 fp->hdr.type == BINDER_TYPE_HANDLE,
2583 NULL, &dest_rdata);
2584 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002585 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002586
2587 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002588 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002589 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002590 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2591 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002592 binder_debug(BINDER_DEBUG_TRANSACTION,
2593 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002594 src_rdata.debug_id, src_rdata.desc,
2595 dest_rdata.debug_id, dest_rdata.desc,
2596 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002597 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002598done:
2599 binder_put_node(node);
2600 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002601}
2602
2603static int binder_translate_fd(int fd,
2604 struct binder_transaction *t,
2605 struct binder_thread *thread,
2606 struct binder_transaction *in_reply_to)
2607{
2608 struct binder_proc *proc = thread->proc;
2609 struct binder_proc *target_proc = t->to_proc;
2610 int target_fd;
2611 struct file *file;
2612 int ret;
2613 bool target_allows_fd;
2614
2615 if (in_reply_to)
2616 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2617 else
2618 target_allows_fd = t->buffer->target_node->accept_fds;
2619 if (!target_allows_fd) {
2620 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2621 proc->pid, thread->pid,
2622 in_reply_to ? "reply" : "transaction",
2623 fd);
2624 ret = -EPERM;
2625 goto err_fd_not_accepted;
2626 }
2627
2628 file = fget(fd);
2629 if (!file) {
2630 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2631 proc->pid, thread->pid, fd);
2632 ret = -EBADF;
2633 goto err_fget;
2634 }
2635 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2636 if (ret < 0) {
2637 ret = -EPERM;
2638 goto err_security;
2639 }
2640
2641 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2642 if (target_fd < 0) {
2643 ret = -ENOMEM;
2644 goto err_get_unused_fd;
2645 }
2646 task_fd_install(target_proc, target_fd, file);
2647 trace_binder_transaction_fd(t, fd, target_fd);
2648 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2649 fd, target_fd);
2650
2651 return target_fd;
2652
2653err_get_unused_fd:
2654err_security:
2655 fput(file);
2656err_fget:
2657err_fd_not_accepted:
2658 return ret;
2659}
2660
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002661static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2662 struct binder_buffer_object *parent,
2663 struct binder_transaction *t,
2664 struct binder_thread *thread,
2665 struct binder_transaction *in_reply_to)
2666{
2667 binder_size_t fdi, fd_buf_size, num_installed_fds;
2668 int target_fd;
2669 uintptr_t parent_buffer;
2670 u32 *fd_array;
2671 struct binder_proc *proc = thread->proc;
2672 struct binder_proc *target_proc = t->to_proc;
2673
2674 fd_buf_size = sizeof(u32) * fda->num_fds;
2675 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2676 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2677 proc->pid, thread->pid, (u64)fda->num_fds);
2678 return -EINVAL;
2679 }
2680 if (fd_buf_size > parent->length ||
2681 fda->parent_offset > parent->length - fd_buf_size) {
2682 /* No space for all file descriptors here. */
2683 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2684 proc->pid, thread->pid, (u64)fda->num_fds);
2685 return -EINVAL;
2686 }
2687 /*
2688 * Since the parent was already fixed up, convert it
2689 * back to the kernel address space to access it
2690 */
Todd Kjosd325d372016-10-10 10:40:53 -07002691 parent_buffer = parent->buffer -
2692 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002693 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002694 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2695 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2696 proc->pid, thread->pid);
2697 return -EINVAL;
2698 }
2699 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2700 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2701 in_reply_to);
2702 if (target_fd < 0)
2703 goto err_translate_fd_failed;
2704 fd_array[fdi] = target_fd;
2705 }
2706 return 0;
2707
2708err_translate_fd_failed:
2709 /*
2710 * Failed to allocate fd or security error, free fds
2711 * installed so far.
2712 */
2713 num_installed_fds = fdi;
2714 for (fdi = 0; fdi < num_installed_fds; fdi++)
2715 task_close_fd(target_proc, fd_array[fdi]);
2716 return target_fd;
2717}
2718
Martijn Coenen5a6da532016-09-30 14:10:07 +02002719static int binder_fixup_parent(struct binder_transaction *t,
2720 struct binder_thread *thread,
2721 struct binder_buffer_object *bp,
2722 binder_size_t *off_start,
2723 binder_size_t num_valid,
2724 struct binder_buffer_object *last_fixup_obj,
2725 binder_size_t last_fixup_min_off)
2726{
2727 struct binder_buffer_object *parent;
2728 u8 *parent_buffer;
2729 struct binder_buffer *b = t->buffer;
2730 struct binder_proc *proc = thread->proc;
2731 struct binder_proc *target_proc = t->to_proc;
2732
2733 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2734 return 0;
2735
2736 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2737 if (!parent) {
2738 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2739 proc->pid, thread->pid);
2740 return -EINVAL;
2741 }
2742
2743 if (!binder_validate_fixup(b, off_start,
2744 parent, bp->parent_offset,
2745 last_fixup_obj,
2746 last_fixup_min_off)) {
2747 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2748 proc->pid, thread->pid);
2749 return -EINVAL;
2750 }
2751
2752 if (parent->length < sizeof(binder_uintptr_t) ||
2753 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2754 /* No space for a pointer here! */
2755 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2756 proc->pid, thread->pid);
2757 return -EINVAL;
2758 }
Arnd Bergmanne312c3f2017-09-05 10:56:13 +02002759 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002760 binder_alloc_get_user_buffer_offset(
2761 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002762 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2763
2764 return 0;
2765}
2766
Martijn Coenen053be422017-06-06 15:17:46 -07002767/**
2768 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2769 * @t: transaction to send
2770 * @proc: process to send the transaction to
2771 * @thread: thread in @proc to send the transaction to (may be NULL)
2772 *
2773 * This function queues a transaction to the specified process. It will try
2774 * to find a thread in the target process to handle the transaction and
2775 * wake it up. If no thread is found, the work is queued to the proc
2776 * waitqueue.
2777 *
2778 * If the @thread parameter is not NULL, the transaction is always queued
2779 * to the waitlist of that specific thread.
2780 *
2781 * Return: true if the transactions was successfully queued
2782 * false if the target process or thread is dead
2783 */
2784static bool binder_proc_transaction(struct binder_transaction *t,
2785 struct binder_proc *proc,
2786 struct binder_thread *thread)
2787{
Martijn Coenen053be422017-06-06 15:17:46 -07002788 struct binder_node *node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002789 struct binder_priority node_prio;
Martijn Coenen053be422017-06-06 15:17:46 -07002790 bool oneway = !!(t->flags & TF_ONE_WAY);
Martijn Coenen1af61802017-10-19 15:04:46 +02002791 bool pending_async = false;
Martijn Coenen053be422017-06-06 15:17:46 -07002792
2793 BUG_ON(!node);
2794 binder_node_lock(node);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002795 node_prio.prio = node->min_priority;
2796 node_prio.sched_policy = node->sched_policy;
2797
Martijn Coenen053be422017-06-06 15:17:46 -07002798 if (oneway) {
2799 BUG_ON(thread);
2800 if (node->has_async_transaction) {
Martijn Coenen1af61802017-10-19 15:04:46 +02002801 pending_async = true;
Martijn Coenen053be422017-06-06 15:17:46 -07002802 } else {
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -06002803 node->has_async_transaction = true;
Martijn Coenen053be422017-06-06 15:17:46 -07002804 }
2805 }
2806
2807 binder_inner_proc_lock(proc);
2808
2809 if (proc->is_dead || (thread && thread->is_dead)) {
2810 binder_inner_proc_unlock(proc);
2811 binder_node_unlock(node);
2812 return false;
2813 }
2814
Martijn Coenen1af61802017-10-19 15:04:46 +02002815 if (!thread && !pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002816 thread = binder_select_thread_ilocked(proc);
2817
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002818 if (thread) {
Martijn Coenenc46810c2017-06-23 10:13:43 -07002819 binder_transaction_priority(thread->task, t, node_prio,
2820 node->inherit_rt);
Martijn Coenen1af61802017-10-19 15:04:46 +02002821 binder_enqueue_thread_work_ilocked(thread, &t->work);
2822 } else if (!pending_async) {
2823 binder_enqueue_work_ilocked(&t->work, &proc->todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002824 } else {
Martijn Coenen1af61802017-10-19 15:04:46 +02002825 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002826 }
Martijn Coenen053be422017-06-06 15:17:46 -07002827
Martijn Coenen1af61802017-10-19 15:04:46 +02002828 if (!pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002829 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2830
2831 binder_inner_proc_unlock(proc);
2832 binder_node_unlock(node);
2833
2834 return true;
2835}
2836
Todd Kjos291d9682017-09-25 08:55:09 -07002837/**
2838 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2839 * @node: struct binder_node for which to get refs
2840 * @proc: returns @node->proc if valid
2841 * @error: if no @proc then returns BR_DEAD_REPLY
2842 *
2843 * User-space normally keeps the node alive when creating a transaction
2844 * since it has a reference to the target. The local strong ref keeps it
2845 * alive if the sending process dies before the target process processes
2846 * the transaction. If the source process is malicious or has a reference
2847 * counting bug, relying on the local strong ref can fail.
2848 *
2849 * Since user-space can cause the local strong ref to go away, we also take
2850 * a tmpref on the node to ensure it survives while we are constructing
2851 * the transaction. We also need a tmpref on the proc while we are
2852 * constructing the transaction, so we take that here as well.
2853 *
2854 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2855 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2856 * target proc has died, @error is set to BR_DEAD_REPLY
2857 */
2858static struct binder_node *binder_get_node_refs_for_txn(
2859 struct binder_node *node,
2860 struct binder_proc **procp,
2861 uint32_t *error)
2862{
2863 struct binder_node *target_node = NULL;
2864
2865 binder_node_inner_lock(node);
2866 if (node->proc) {
2867 target_node = node;
2868 binder_inc_node_nilocked(node, 1, 0, NULL);
2869 binder_inc_node_tmpref_ilocked(node);
2870 node->proc->tmp_ref++;
2871 *procp = node->proc;
2872 } else
2873 *error = BR_DEAD_REPLY;
2874 binder_node_inner_unlock(node);
2875
2876 return target_node;
2877}
2878
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002879static void binder_transaction(struct binder_proc *proc,
2880 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002881 struct binder_transaction_data *tr, int reply,
2882 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002883{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002884 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002885 struct binder_transaction *t;
2886 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002887 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002888 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002889 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002890 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002891 struct binder_thread *target_thread = NULL;
2892 struct binder_node *target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002893 struct binder_transaction *in_reply_to = NULL;
2894 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002895 uint32_t return_error = 0;
2896 uint32_t return_error_param = 0;
2897 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002898 struct binder_buffer_object *last_fixup_obj = NULL;
2899 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002900 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002901 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002902
2903 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002904 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002905 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2906 e->from_proc = proc->pid;
2907 e->from_thread = thread->pid;
2908 e->target_handle = tr->target.handle;
2909 e->data_size = tr->data_size;
2910 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002911 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002912
2913 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002914 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002915 in_reply_to = thread->transaction_stack;
2916 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002917 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302918 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002919 proc->pid, thread->pid);
2920 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002921 return_error_param = -EPROTO;
2922 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002923 goto err_empty_call_stack;
2924 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002925 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002926 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302927 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002928 proc->pid, thread->pid, in_reply_to->debug_id,
2929 in_reply_to->to_proc ?
2930 in_reply_to->to_proc->pid : 0,
2931 in_reply_to->to_thread ?
2932 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002933 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002934 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002935 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002936 return_error_param = -EPROTO;
2937 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002938 in_reply_to = NULL;
2939 goto err_bad_call_stack;
2940 }
2941 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002942 binder_inner_proc_unlock(proc);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002943 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002944 if (target_thread == NULL) {
2945 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002946 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002947 goto err_dead_binder;
2948 }
2949 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302950 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002951 proc->pid, thread->pid,
2952 target_thread->transaction_stack ?
2953 target_thread->transaction_stack->debug_id : 0,
2954 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002955 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002956 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002957 return_error_param = -EPROTO;
2958 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002959 in_reply_to = NULL;
2960 target_thread = NULL;
2961 goto err_dead_binder;
2962 }
2963 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002964 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002965 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002966 } else {
2967 if (tr->target.handle) {
2968 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002969
Todd Kjosc37162d2017-05-26 11:56:29 -07002970 /*
2971 * There must already be a strong ref
2972 * on this node. If so, do a strong
2973 * increment on the node to ensure it
2974 * stays alive until the transaction is
2975 * done.
2976 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002977 binder_proc_lock(proc);
2978 ref = binder_get_ref_olocked(proc, tr->target.handle,
2979 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002980 if (ref) {
Todd Kjos291d9682017-09-25 08:55:09 -07002981 target_node = binder_get_node_refs_for_txn(
2982 ref->node, &target_proc,
2983 &return_error);
2984 } else {
2985 binder_user_error("%d:%d got transaction to invalid handle\n",
2986 proc->pid, thread->pid);
2987 return_error = BR_FAILED_REPLY;
Todd Kjosc37162d2017-05-26 11:56:29 -07002988 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002989 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002990 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002991 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002992 target_node = context->binder_context_mgr_node;
Todd Kjos291d9682017-09-25 08:55:09 -07002993 if (target_node)
2994 target_node = binder_get_node_refs_for_txn(
2995 target_node, &target_proc,
2996 &return_error);
2997 else
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002998 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002999 mutex_unlock(&context->context_mgr_node_lock);
Martijn Coenenc4048b22018-03-28 11:14:50 +02003000 if (target_node && target_proc == proc) {
3001 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3002 proc->pid, thread->pid);
3003 return_error = BR_FAILED_REPLY;
3004 return_error_param = -EINVAL;
3005 return_error_line = __LINE__;
3006 goto err_invalid_target_handle;
3007 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003008 }
Todd Kjos291d9682017-09-25 08:55:09 -07003009 if (!target_node) {
3010 /*
3011 * return_error is set above
3012 */
3013 return_error_param = -EINVAL;
Todd Kjose598d172017-03-22 17:19:52 -07003014 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003015 goto err_dead_binder;
3016 }
Todd Kjos291d9682017-09-25 08:55:09 -07003017 e->to_node = target_node->debug_id;
Stephen Smalley79af7302015-01-21 10:54:10 -05003018 if (security_binder_transaction(proc->tsk,
3019 target_proc->tsk) < 0) {
3020 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003021 return_error_param = -EPERM;
3022 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05003023 goto err_invalid_target_handle;
3024 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003025 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003026 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3027 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003028
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003029 tmp = thread->transaction_stack;
3030 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003031 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05303032 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003033 proc->pid, thread->pid, tmp->debug_id,
3034 tmp->to_proc ? tmp->to_proc->pid : 0,
3035 tmp->to_thread ?
3036 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07003037 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003038 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003039 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003040 return_error_param = -EPROTO;
3041 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003042 goto err_bad_call_stack;
3043 }
3044 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003045 struct binder_thread *from;
3046
3047 spin_lock(&tmp->lock);
3048 from = tmp->from;
3049 if (from && from->proc == target_proc) {
3050 atomic_inc(&from->tmp_ref);
3051 target_thread = from;
3052 spin_unlock(&tmp->lock);
3053 break;
3054 }
3055 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003056 tmp = tmp->from_parent;
3057 }
3058 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003059 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003060 }
Martijn Coenen053be422017-06-06 15:17:46 -07003061 if (target_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003062 e->to_thread = target_thread->pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003063 e->to_proc = target_proc->pid;
3064
3065 /* TODO: reuse incoming transaction for reply */
3066 t = kzalloc(sizeof(*t), GFP_KERNEL);
3067 if (t == NULL) {
3068 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003069 return_error_param = -ENOMEM;
3070 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003071 goto err_alloc_t_failed;
3072 }
3073 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07003074 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003075
3076 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3077 if (tcomplete == NULL) {
3078 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003079 return_error_param = -ENOMEM;
3080 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003081 goto err_alloc_tcomplete_failed;
3082 }
3083 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3084
Todd Kjos1cfe6272017-05-24 13:33:28 -07003085 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003086
3087 if (reply)
3088 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003089 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003090 proc->pid, thread->pid, t->debug_id,
3091 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003092 (u64)tr->data.ptr.buffer,
3093 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003094 (u64)tr->data_size, (u64)tr->offsets_size,
3095 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003096 else
3097 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003098 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003099 proc->pid, thread->pid, t->debug_id,
3100 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003101 (u64)tr->data.ptr.buffer,
3102 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003103 (u64)tr->data_size, (u64)tr->offsets_size,
3104 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003105
3106 if (!reply && !(tr->flags & TF_ONE_WAY))
3107 t->from = thread;
3108 else
3109 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03003110 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003111 t->to_proc = target_proc;
3112 t->to_thread = target_thread;
3113 t->code = tr->code;
3114 t->flags = tr->flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07003115 if (!(t->flags & TF_ONE_WAY) &&
3116 binder_supported_policy(current->policy)) {
3117 /* Inherit supported policies for synchronous transactions */
3118 t->priority.sched_policy = current->policy;
3119 t->priority.prio = current->normal_prio;
3120 } else {
3121 /* Otherwise, fall back to the default priority */
3122 t->priority = target_proc->default_priority;
3123 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003124
3125 trace_binder_transaction(reply, t, target_node);
3126
Todd Kjosd325d372016-10-10 10:40:53 -07003127 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02003128 tr->offsets_size, extra_buffers_size,
3129 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07003130 if (IS_ERR(t->buffer)) {
3131 /*
3132 * -ESRCH indicates VMA cleared. The target is dying.
3133 */
3134 return_error_param = PTR_ERR(t->buffer);
3135 return_error = return_error_param == -ESRCH ?
3136 BR_DEAD_REPLY : BR_FAILED_REPLY;
3137 return_error_line = __LINE__;
3138 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003139 goto err_binder_alloc_buf_failed;
3140 }
3141 t->buffer->allow_user_free = 0;
3142 t->buffer->debug_id = t->debug_id;
3143 t->buffer->transaction = t;
3144 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003145 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003146 off_start = (binder_size_t *)(t->buffer->data +
3147 ALIGN(tr->data_size, sizeof(void *)));
3148 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003149
Arve Hjønnevågda498892014-02-21 14:40:26 -08003150 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3151 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303152 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3153 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003154 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003155 return_error_param = -EFAULT;
3156 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003157 goto err_copy_data_failed;
3158 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003159 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3160 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303161 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3162 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003163 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003164 return_error_param = -EFAULT;
3165 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003166 goto err_copy_data_failed;
3167 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003168 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3169 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3170 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003171 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003172 return_error_param = -EINVAL;
3173 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003174 goto err_bad_offset;
3175 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02003176 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3177 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3178 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05303179 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003180 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003181 return_error_param = -EINVAL;
3182 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003183 goto err_bad_offset;
3184 }
3185 off_end = (void *)off_start + tr->offsets_size;
3186 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3187 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003188 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003189 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02003190 struct binder_object_header *hdr;
3191 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09003192
Martijn Coenen00c80372016-07-13 12:06:49 +02003193 if (object_size == 0 || *offp < off_min) {
3194 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003195 proc->pid, thread->pid, (u64)*offp,
3196 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02003197 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003198 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003199 return_error_param = -EINVAL;
3200 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003201 goto err_bad_offset;
3202 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003203
3204 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3205 off_min = *offp + object_size;
3206 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003207 case BINDER_TYPE_BINDER:
3208 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003209 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003210
Martijn Coenen00c80372016-07-13 12:06:49 +02003211 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003212 ret = binder_translate_binder(fp, t, thread);
3213 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02003214 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003215 return_error_param = ret;
3216 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003217 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003218 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003219 } break;
3220 case BINDER_TYPE_HANDLE:
3221 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003222 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003223
Martijn Coenen00c80372016-07-13 12:06:49 +02003224 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003225 ret = binder_translate_handle(fp, t, thread);
3226 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003227 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003228 return_error_param = ret;
3229 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003230 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003231 }
3232 } break;
3233
3234 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003235 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003236 int target_fd = binder_translate_fd(fp->fd, t, thread,
3237 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003238
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003239 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003240 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003241 return_error_param = target_fd;
3242 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003243 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003244 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003245 fp->pad_binder = 0;
3246 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003247 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003248 case BINDER_TYPE_FDA: {
3249 struct binder_fd_array_object *fda =
3250 to_binder_fd_array_object(hdr);
3251 struct binder_buffer_object *parent =
3252 binder_validate_ptr(t->buffer, fda->parent,
3253 off_start,
3254 offp - off_start);
3255 if (!parent) {
3256 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3257 proc->pid, thread->pid);
3258 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003259 return_error_param = -EINVAL;
3260 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003261 goto err_bad_parent;
3262 }
3263 if (!binder_validate_fixup(t->buffer, off_start,
3264 parent, fda->parent_offset,
3265 last_fixup_obj,
3266 last_fixup_min_off)) {
3267 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3268 proc->pid, thread->pid);
3269 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003270 return_error_param = -EINVAL;
3271 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003272 goto err_bad_parent;
3273 }
3274 ret = binder_translate_fd_array(fda, parent, t, thread,
3275 in_reply_to);
3276 if (ret < 0) {
3277 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003278 return_error_param = ret;
3279 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003280 goto err_translate_failed;
3281 }
3282 last_fixup_obj = parent;
3283 last_fixup_min_off =
3284 fda->parent_offset + sizeof(u32) * fda->num_fds;
3285 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003286 case BINDER_TYPE_PTR: {
3287 struct binder_buffer_object *bp =
3288 to_binder_buffer_object(hdr);
3289 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003290
Martijn Coenen5a6da532016-09-30 14:10:07 +02003291 if (bp->length > buf_left) {
3292 binder_user_error("%d:%d got transaction with too large buffer\n",
3293 proc->pid, thread->pid);
3294 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003295 return_error_param = -EINVAL;
3296 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003297 goto err_bad_offset;
3298 }
3299 if (copy_from_user(sg_bufp,
3300 (const void __user *)(uintptr_t)
3301 bp->buffer, bp->length)) {
3302 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3303 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07003304 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003305 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003306 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003307 goto err_copy_data_failed;
3308 }
3309 /* Fixup buffer pointer to target proc address space */
3310 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07003311 binder_alloc_get_user_buffer_offset(
3312 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003313 sg_bufp += ALIGN(bp->length, sizeof(u64));
3314
3315 ret = binder_fixup_parent(t, thread, bp, off_start,
3316 offp - off_start,
3317 last_fixup_obj,
3318 last_fixup_min_off);
3319 if (ret < 0) {
3320 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003321 return_error_param = ret;
3322 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003323 goto err_translate_failed;
3324 }
3325 last_fixup_obj = bp;
3326 last_fixup_min_off = 0;
3327 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003328 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01003329 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02003330 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003331 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003332 return_error_param = -EINVAL;
3333 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003334 goto err_bad_object_type;
3335 }
3336 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003337 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003338 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003339
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003340 if (reply) {
Martijn Coenen1af61802017-10-19 15:04:46 +02003341 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003342 binder_inner_proc_lock(target_proc);
3343 if (target_thread->is_dead) {
3344 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003345 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003346 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003347 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003348 binder_pop_transaction_ilocked(target_thread, in_reply_to);
Martijn Coenen1af61802017-10-19 15:04:46 +02003349 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003350 binder_inner_proc_unlock(target_proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003351 wake_up_interruptible_sync(&target_thread->wait);
Martijn Coenenecd972d2017-05-26 10:48:56 -07003352 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003353 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003354 } else if (!(t->flags & TF_ONE_WAY)) {
3355 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003356 binder_inner_proc_lock(proc);
Martijn Coenendac2e9c2017-11-13 09:55:21 +01003357 /*
3358 * Defer the TRANSACTION_COMPLETE, so we don't return to
3359 * userspace immediately; this allows the target process to
3360 * immediately start processing this transaction, reducing
3361 * latency. We will then return the TRANSACTION_COMPLETE when
3362 * the target replies (or there is an error).
3363 */
3364 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003365 t->need_reply = 1;
3366 t->from_parent = thread->transaction_stack;
3367 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003368 binder_inner_proc_unlock(proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003369 if (!binder_proc_transaction(t, target_proc, target_thread)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003370 binder_inner_proc_lock(proc);
3371 binder_pop_transaction_ilocked(thread, t);
3372 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003373 goto err_dead_proc_or_thread;
3374 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003375 } else {
3376 BUG_ON(target_node == NULL);
3377 BUG_ON(t->buffer->async_transaction != 1);
Martijn Coenen1af61802017-10-19 15:04:46 +02003378 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen053be422017-06-06 15:17:46 -07003379 if (!binder_proc_transaction(t, target_proc, NULL))
Todd Kjos2f993e22017-05-12 14:42:55 -07003380 goto err_dead_proc_or_thread;
Riley Andrewsb5968812015-09-01 12:42:07 -07003381 }
Todd Kjos2f993e22017-05-12 14:42:55 -07003382 if (target_thread)
3383 binder_thread_dec_tmpref(target_thread);
3384 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003385 if (target_node)
3386 binder_dec_node_tmpref(target_node);
Todd Kjos1cfe6272017-05-24 13:33:28 -07003387 /*
3388 * write barrier to synchronize with initialization
3389 * of log entry
3390 */
3391 smp_wmb();
3392 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003393 return;
3394
Todd Kjos2f993e22017-05-12 14:42:55 -07003395err_dead_proc_or_thread:
3396 return_error = BR_DEAD_REPLY;
3397 return_error_line = __LINE__;
Xu YiPing86578a02017-05-22 11:26:23 -07003398 binder_dequeue_work(proc, tcomplete);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003399err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003400err_bad_object_type:
3401err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003402err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003403err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003404 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003405 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjos291d9682017-09-25 08:55:09 -07003406 if (target_node)
3407 binder_dec_node_tmpref(target_node);
Todd Kjosc37162d2017-05-26 11:56:29 -07003408 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003409 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07003410 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003411err_binder_alloc_buf_failed:
3412 kfree(tcomplete);
3413 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3414err_alloc_tcomplete_failed:
3415 kfree(t);
3416 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3417err_alloc_t_failed:
3418err_bad_call_stack:
3419err_empty_call_stack:
3420err_dead_binder:
3421err_invalid_target_handle:
Todd Kjos2f993e22017-05-12 14:42:55 -07003422 if (target_thread)
3423 binder_thread_dec_tmpref(target_thread);
3424 if (target_proc)
3425 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003426 if (target_node) {
Todd Kjosc37162d2017-05-26 11:56:29 -07003427 binder_dec_node(target_node, 1, 0);
Todd Kjos291d9682017-09-25 08:55:09 -07003428 binder_dec_node_tmpref(target_node);
3429 }
Todd Kjosc37162d2017-05-26 11:56:29 -07003430
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003431 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07003432 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3433 proc->pid, thread->pid, return_error, return_error_param,
3434 (u64)tr->data_size, (u64)tr->offsets_size,
3435 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003436
3437 {
3438 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09003439
Todd Kjose598d172017-03-22 17:19:52 -07003440 e->return_error = return_error;
3441 e->return_error_param = return_error_param;
3442 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003443 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3444 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07003445 /*
3446 * write barrier to synchronize with initialization
3447 * of log entry
3448 */
3449 smp_wmb();
3450 WRITE_ONCE(e->debug_id_done, t_debug_id);
3451 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003452 }
3453
Todd Kjos858b8da2017-04-21 17:35:12 -07003454 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003455 if (in_reply_to) {
Martijn Coenenecd972d2017-05-26 10:48:56 -07003456 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos858b8da2017-04-21 17:35:12 -07003457 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Martijn Coenen1af61802017-10-19 15:04:46 +02003458 binder_enqueue_thread_work(thread, &thread->return_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003459 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07003460 } else {
3461 thread->return_error.cmd = return_error;
Martijn Coenen1af61802017-10-19 15:04:46 +02003462 binder_enqueue_thread_work(thread, &thread->return_error.work);
Todd Kjos858b8da2017-04-21 17:35:12 -07003463 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003464}
3465
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003466static int binder_thread_write(struct binder_proc *proc,
3467 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003468 binder_uintptr_t binder_buffer, size_t size,
3469 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003470{
3471 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003472 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003473 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003474 void __user *ptr = buffer + *consumed;
3475 void __user *end = buffer + size;
3476
Todd Kjos858b8da2017-04-21 17:35:12 -07003477 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003478 int ret;
3479
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003480 if (get_user(cmd, (uint32_t __user *)ptr))
3481 return -EFAULT;
3482 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003483 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003484 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003485 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3486 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3487 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003488 }
3489 switch (cmd) {
3490 case BC_INCREFS:
3491 case BC_ACQUIRE:
3492 case BC_RELEASE:
3493 case BC_DECREFS: {
3494 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003495 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003496 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3497 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3498 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003499
3500 if (get_user(target, (uint32_t __user *)ptr))
3501 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003502
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003503 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003504 ret = -1;
3505 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003506 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003507 mutex_lock(&context->context_mgr_node_lock);
3508 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003509 if (ctx_mgr_node)
3510 ret = binder_inc_ref_for_node(
3511 proc, ctx_mgr_node,
3512 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003513 mutex_unlock(&context->context_mgr_node_lock);
3514 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003515 if (ret)
3516 ret = binder_update_ref_for_handle(
3517 proc, target, increment, strong,
3518 &rdata);
3519 if (!ret && rdata.desc != target) {
3520 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3521 proc->pid, thread->pid,
3522 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003523 }
3524 switch (cmd) {
3525 case BC_INCREFS:
3526 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003527 break;
3528 case BC_ACQUIRE:
3529 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003530 break;
3531 case BC_RELEASE:
3532 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003533 break;
3534 case BC_DECREFS:
3535 default:
3536 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003537 break;
3538 }
3539 if (ret) {
3540 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3541 proc->pid, thread->pid, debug_string,
3542 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003543 break;
3544 }
3545 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003546 "%d:%d %s ref %d desc %d s %d w %d\n",
3547 proc->pid, thread->pid, debug_string,
3548 rdata.debug_id, rdata.desc, rdata.strong,
3549 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003550 break;
3551 }
3552 case BC_INCREFS_DONE:
3553 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003554 binder_uintptr_t node_ptr;
3555 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003556 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003557 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003558
Arve Hjønnevågda498892014-02-21 14:40:26 -08003559 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003560 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003561 ptr += sizeof(binder_uintptr_t);
3562 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003563 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003564 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003565 node = binder_get_node(proc, node_ptr);
3566 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003567 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003568 proc->pid, thread->pid,
3569 cmd == BC_INCREFS_DONE ?
3570 "BC_INCREFS_DONE" :
3571 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003572 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003573 break;
3574 }
3575 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003576 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003577 proc->pid, thread->pid,
3578 cmd == BC_INCREFS_DONE ?
3579 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003580 (u64)node_ptr, node->debug_id,
3581 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003582 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003583 break;
3584 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003585 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003586 if (cmd == BC_ACQUIRE_DONE) {
3587 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303588 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003589 proc->pid, thread->pid,
3590 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003591 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003592 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003593 break;
3594 }
3595 node->pending_strong_ref = 0;
3596 } else {
3597 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303598 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003599 proc->pid, thread->pid,
3600 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003601 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003602 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003603 break;
3604 }
3605 node->pending_weak_ref = 0;
3606 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003607 free_node = binder_dec_node_nilocked(node,
3608 cmd == BC_ACQUIRE_DONE, 0);
3609 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003610 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003611 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003612 proc->pid, thread->pid,
3613 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003614 node->debug_id, node->local_strong_refs,
3615 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003616 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003617 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003618 break;
3619 }
3620 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303621 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003622 return -EINVAL;
3623 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303624 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003625 return -EINVAL;
3626
3627 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003628 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003629 struct binder_buffer *buffer;
3630
Arve Hjønnevågda498892014-02-21 14:40:26 -08003631 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003632 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003633 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003634
Todd Kjos076072a2017-04-21 14:32:11 -07003635 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3636 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003637 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003638 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3639 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003640 break;
3641 }
3642 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003643 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3644 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003645 break;
3646 }
3647 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003648 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3649 proc->pid, thread->pid, (u64)data_ptr,
3650 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003651 buffer->transaction ? "active" : "finished");
3652
3653 if (buffer->transaction) {
3654 buffer->transaction->buffer = NULL;
3655 buffer->transaction = NULL;
3656 }
3657 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003658 struct binder_node *buf_node;
3659 struct binder_work *w;
3660
3661 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003662 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003663 BUG_ON(!buf_node->has_async_transaction);
3664 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003665 w = binder_dequeue_work_head_ilocked(
3666 &buf_node->async_todo);
Martijn Coenen4501c042017-08-10 13:56:16 +02003667 if (!w) {
Gustavo A. R. Silvae62dd6f2018-01-23 12:04:27 -06003668 buf_node->has_async_transaction = false;
Martijn Coenen4501c042017-08-10 13:56:16 +02003669 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003670 binder_enqueue_work_ilocked(
Martijn Coenen4501c042017-08-10 13:56:16 +02003671 w, &proc->todo);
3672 binder_wakeup_proc_ilocked(proc);
3673 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003674 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003675 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003676 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003677 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003678 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003679 break;
3680 }
3681
Martijn Coenen5a6da532016-09-30 14:10:07 +02003682 case BC_TRANSACTION_SG:
3683 case BC_REPLY_SG: {
3684 struct binder_transaction_data_sg tr;
3685
3686 if (copy_from_user(&tr, ptr, sizeof(tr)))
3687 return -EFAULT;
3688 ptr += sizeof(tr);
3689 binder_transaction(proc, thread, &tr.transaction_data,
3690 cmd == BC_REPLY_SG, tr.buffers_size);
3691 break;
3692 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003693 case BC_TRANSACTION:
3694 case BC_REPLY: {
3695 struct binder_transaction_data tr;
3696
3697 if (copy_from_user(&tr, ptr, sizeof(tr)))
3698 return -EFAULT;
3699 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003700 binder_transaction(proc, thread, &tr,
3701 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003702 break;
3703 }
3704
3705 case BC_REGISTER_LOOPER:
3706 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303707 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003708 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003709 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003710 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3711 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303712 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003713 proc->pid, thread->pid);
3714 } else if (proc->requested_threads == 0) {
3715 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303716 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003717 proc->pid, thread->pid);
3718 } else {
3719 proc->requested_threads--;
3720 proc->requested_threads_started++;
3721 }
3722 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003723 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003724 break;
3725 case BC_ENTER_LOOPER:
3726 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303727 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003728 proc->pid, thread->pid);
3729 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3730 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303731 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003732 proc->pid, thread->pid);
3733 }
3734 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3735 break;
3736 case BC_EXIT_LOOPER:
3737 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303738 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003739 proc->pid, thread->pid);
3740 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3741 break;
3742
3743 case BC_REQUEST_DEATH_NOTIFICATION:
3744 case BC_CLEAR_DEATH_NOTIFICATION: {
3745 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003746 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003747 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003748 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003749
3750 if (get_user(target, (uint32_t __user *)ptr))
3751 return -EFAULT;
3752 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003753 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003754 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003755 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003756 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3757 /*
3758 * Allocate memory for death notification
3759 * before taking lock
3760 */
3761 death = kzalloc(sizeof(*death), GFP_KERNEL);
3762 if (death == NULL) {
3763 WARN_ON(thread->return_error.cmd !=
3764 BR_OK);
3765 thread->return_error.cmd = BR_ERROR;
Martijn Coenen1af61802017-10-19 15:04:46 +02003766 binder_enqueue_thread_work(
3767 thread,
3768 &thread->return_error.work);
Todd Kjos5346bf32016-10-20 16:43:34 -07003769 binder_debug(
3770 BINDER_DEBUG_FAILED_TRANSACTION,
3771 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3772 proc->pid, thread->pid);
3773 break;
3774 }
3775 }
3776 binder_proc_lock(proc);
3777 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003778 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303779 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003780 proc->pid, thread->pid,
3781 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3782 "BC_REQUEST_DEATH_NOTIFICATION" :
3783 "BC_CLEAR_DEATH_NOTIFICATION",
3784 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003785 binder_proc_unlock(proc);
3786 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003787 break;
3788 }
3789
3790 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003791 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003792 proc->pid, thread->pid,
3793 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3794 "BC_REQUEST_DEATH_NOTIFICATION" :
3795 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003796 (u64)cookie, ref->data.debug_id,
3797 ref->data.desc, ref->data.strong,
3798 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003799
Martijn Coenenf9eac642017-05-22 11:26:23 -07003800 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003801 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3802 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303803 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003804 proc->pid, thread->pid);
Martijn Coenenf9eac642017-05-22 11:26:23 -07003805 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003806 binder_proc_unlock(proc);
3807 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003808 break;
3809 }
3810 binder_stats_created(BINDER_STAT_DEATH);
3811 INIT_LIST_HEAD(&death->work.entry);
3812 death->cookie = cookie;
3813 ref->death = death;
3814 if (ref->node->proc == NULL) {
3815 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Martijn Coenen3bdbe4c2017-08-10 13:50:52 +02003816
3817 binder_inner_proc_lock(proc);
3818 binder_enqueue_work_ilocked(
3819 &ref->death->work, &proc->todo);
3820 binder_wakeup_proc_ilocked(proc);
3821 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003822 }
3823 } else {
3824 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303825 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003826 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003827 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003828 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003829 break;
3830 }
3831 death = ref->death;
3832 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003833 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003834 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003835 (u64)death->cookie,
3836 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003837 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003838 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003839 break;
3840 }
3841 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003842 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003843 if (list_empty(&death->work.entry)) {
3844 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003845 if (thread->looper &
3846 (BINDER_LOOPER_STATE_REGISTERED |
3847 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003848 binder_enqueue_thread_work_ilocked(
3849 thread,
3850 &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003851 else {
3852 binder_enqueue_work_ilocked(
3853 &death->work,
3854 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003855 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003856 proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003857 }
3858 } else {
3859 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3860 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3861 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003862 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003863 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003864 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003865 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003866 } break;
3867 case BC_DEAD_BINDER_DONE: {
3868 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003869 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003870 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003871
Arve Hjønnevågda498892014-02-21 14:40:26 -08003872 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003873 return -EFAULT;
3874
Lisa Du7a64cd82016-02-17 09:32:52 +08003875 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003876 binder_inner_proc_lock(proc);
3877 list_for_each_entry(w, &proc->delivered_death,
3878 entry) {
3879 struct binder_ref_death *tmp_death =
3880 container_of(w,
3881 struct binder_ref_death,
3882 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003883
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003884 if (tmp_death->cookie == cookie) {
3885 death = tmp_death;
3886 break;
3887 }
3888 }
3889 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003890 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3891 proc->pid, thread->pid, (u64)cookie,
3892 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003893 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003894 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3895 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003896 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003897 break;
3898 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003899 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003900 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3901 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003902 if (thread->looper &
3903 (BINDER_LOOPER_STATE_REGISTERED |
3904 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003905 binder_enqueue_thread_work_ilocked(
3906 thread, &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003907 else {
3908 binder_enqueue_work_ilocked(
3909 &death->work,
3910 &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07003911 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003912 }
3913 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003914 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003915 } break;
3916
3917 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303918 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003919 proc->pid, thread->pid, cmd);
3920 return -EINVAL;
3921 }
3922 *consumed = ptr - buffer;
3923 }
3924 return 0;
3925}
3926
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003927static void binder_stat_br(struct binder_proc *proc,
3928 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003929{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003930 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003931 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003932 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3933 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3934 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003935 }
3936}
3937
Todd Kjos60792612017-05-24 10:51:01 -07003938static int binder_put_node_cmd(struct binder_proc *proc,
3939 struct binder_thread *thread,
3940 void __user **ptrp,
3941 binder_uintptr_t node_ptr,
3942 binder_uintptr_t node_cookie,
3943 int node_debug_id,
3944 uint32_t cmd, const char *cmd_name)
3945{
3946 void __user *ptr = *ptrp;
3947
3948 if (put_user(cmd, (uint32_t __user *)ptr))
3949 return -EFAULT;
3950 ptr += sizeof(uint32_t);
3951
3952 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3953 return -EFAULT;
3954 ptr += sizeof(binder_uintptr_t);
3955
3956 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3957 return -EFAULT;
3958 ptr += sizeof(binder_uintptr_t);
3959
3960 binder_stat_br(proc, thread, cmd);
3961 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3962 proc->pid, thread->pid, cmd_name, node_debug_id,
3963 (u64)node_ptr, (u64)node_cookie);
3964
3965 *ptrp = ptr;
3966 return 0;
3967}
3968
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003969static int binder_wait_for_work(struct binder_thread *thread,
3970 bool do_proc_work)
3971{
3972 DEFINE_WAIT(wait);
3973 struct binder_proc *proc = thread->proc;
3974 int ret = 0;
3975
3976 freezer_do_not_count();
3977 binder_inner_proc_lock(proc);
3978 for (;;) {
3979 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3980 if (binder_has_work_ilocked(thread, do_proc_work))
3981 break;
3982 if (do_proc_work)
3983 list_add(&thread->waiting_thread_node,
3984 &proc->waiting_threads);
3985 binder_inner_proc_unlock(proc);
3986 schedule();
3987 binder_inner_proc_lock(proc);
3988 list_del_init(&thread->waiting_thread_node);
3989 if (signal_pending(current)) {
3990 ret = -ERESTARTSYS;
3991 break;
3992 }
3993 }
3994 finish_wait(&thread->wait, &wait);
3995 binder_inner_proc_unlock(proc);
3996 freezer_count();
3997
3998 return ret;
3999}
4000
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004001static int binder_thread_read(struct binder_proc *proc,
4002 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004003 binder_uintptr_t binder_buffer, size_t size,
4004 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004005{
Arve Hjønnevågda498892014-02-21 14:40:26 -08004006 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004007 void __user *ptr = buffer + *consumed;
4008 void __user *end = buffer + size;
4009
4010 int ret = 0;
4011 int wait_for_proc_work;
4012
4013 if (*consumed == 0) {
4014 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4015 return -EFAULT;
4016 ptr += sizeof(uint32_t);
4017 }
4018
4019retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07004020 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004021 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07004022 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004023
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004024 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004025
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004026 trace_binder_wait_for_work(wait_for_proc_work,
4027 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004028 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004029 if (wait_for_proc_work) {
4030 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4031 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304032 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004033 proc->pid, thread->pid, thread->looper);
4034 wait_event_interruptible(binder_user_error_wait,
4035 binder_stop_on_user_error < 2);
4036 }
Martijn Coenenecd972d2017-05-26 10:48:56 -07004037 binder_restore_priority(current, proc->default_priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004038 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004039
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004040 if (non_block) {
4041 if (!binder_has_work(thread, wait_for_proc_work))
4042 ret = -EAGAIN;
4043 } else {
4044 ret = binder_wait_for_work(thread, wait_for_proc_work);
4045 }
4046
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004047 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4048
4049 if (ret)
4050 return ret;
4051
4052 while (1) {
4053 uint32_t cmd;
4054 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004055 struct binder_work *w = NULL;
4056 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004057 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07004058 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004059
Todd Kjose7f23ed2017-03-21 13:06:01 -07004060 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004061 if (!binder_worklist_empty_ilocked(&thread->todo))
4062 list = &thread->todo;
4063 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4064 wait_for_proc_work)
4065 list = &proc->todo;
4066 else {
4067 binder_inner_proc_unlock(proc);
4068
Dmitry Voytik395262a2014-09-08 18:16:34 +04004069 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08004070 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004071 goto retry;
4072 break;
4073 }
4074
Todd Kjose7f23ed2017-03-21 13:06:01 -07004075 if (end - ptr < sizeof(tr) + 4) {
4076 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004077 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004078 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004079 w = binder_dequeue_work_head_ilocked(list);
Martijn Coenen1af61802017-10-19 15:04:46 +02004080 if (binder_worklist_empty_ilocked(&thread->todo))
4081 thread->process_todo = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004082
4083 switch (w->type) {
4084 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004085 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004086 t = container_of(w, struct binder_transaction, work);
4087 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004088 case BINDER_WORK_RETURN_ERROR: {
4089 struct binder_error *e = container_of(
4090 w, struct binder_error, work);
4091
4092 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004093 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07004094 if (put_user(e->cmd, (uint32_t __user *)ptr))
4095 return -EFAULT;
宋金时e1b1a8b2018-05-10 02:05:03 +00004096 cmd = e->cmd;
Todd Kjos858b8da2017-04-21 17:35:12 -07004097 e->cmd = BR_OK;
4098 ptr += sizeof(uint32_t);
4099
4100 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07004101 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004102 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004103 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004104 cmd = BR_TRANSACTION_COMPLETE;
4105 if (put_user(cmd, (uint32_t __user *)ptr))
4106 return -EFAULT;
4107 ptr += sizeof(uint32_t);
4108
4109 binder_stat_br(proc, thread, cmd);
4110 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304111 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004112 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004113 kfree(w);
4114 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4115 } break;
4116 case BINDER_WORK_NODE: {
4117 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07004118 int strong, weak;
4119 binder_uintptr_t node_ptr = node->ptr;
4120 binder_uintptr_t node_cookie = node->cookie;
4121 int node_debug_id = node->debug_id;
4122 int has_weak_ref;
4123 int has_strong_ref;
4124 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09004125
Todd Kjos60792612017-05-24 10:51:01 -07004126 BUG_ON(proc != node->proc);
4127 strong = node->internal_strong_refs ||
4128 node->local_strong_refs;
4129 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07004130 node->local_weak_refs ||
4131 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07004132 has_strong_ref = node->has_strong_ref;
4133 has_weak_ref = node->has_weak_ref;
4134
4135 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004136 node->has_weak_ref = 1;
4137 node->pending_weak_ref = 1;
4138 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004139 }
4140 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004141 node->has_strong_ref = 1;
4142 node->pending_strong_ref = 1;
4143 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004144 }
4145 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004146 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004147 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004148 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004149 if (!weak && !strong) {
4150 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4151 "%d:%d node %d u%016llx c%016llx deleted\n",
4152 proc->pid, thread->pid,
4153 node_debug_id,
4154 (u64)node_ptr,
4155 (u64)node_cookie);
4156 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004157 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004158 binder_node_lock(node);
4159 /*
4160 * Acquire the node lock before freeing the
4161 * node to serialize with other threads that
4162 * may have been holding the node lock while
4163 * decrementing this node (avoids race where
4164 * this thread frees while the other thread
4165 * is unlocking the node after the final
4166 * decrement)
4167 */
4168 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004169 binder_free_node(node);
4170 } else
4171 binder_inner_proc_unlock(proc);
4172
Todd Kjos60792612017-05-24 10:51:01 -07004173 if (weak && !has_weak_ref)
4174 ret = binder_put_node_cmd(
4175 proc, thread, &ptr, node_ptr,
4176 node_cookie, node_debug_id,
4177 BR_INCREFS, "BR_INCREFS");
4178 if (!ret && strong && !has_strong_ref)
4179 ret = binder_put_node_cmd(
4180 proc, thread, &ptr, node_ptr,
4181 node_cookie, node_debug_id,
4182 BR_ACQUIRE, "BR_ACQUIRE");
4183 if (!ret && !strong && has_strong_ref)
4184 ret = binder_put_node_cmd(
4185 proc, thread, &ptr, node_ptr,
4186 node_cookie, node_debug_id,
4187 BR_RELEASE, "BR_RELEASE");
4188 if (!ret && !weak && has_weak_ref)
4189 ret = binder_put_node_cmd(
4190 proc, thread, &ptr, node_ptr,
4191 node_cookie, node_debug_id,
4192 BR_DECREFS, "BR_DECREFS");
4193 if (orig_ptr == ptr)
4194 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4195 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4196 proc->pid, thread->pid,
4197 node_debug_id,
4198 (u64)node_ptr,
4199 (u64)node_cookie);
4200 if (ret)
4201 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004202 } break;
4203 case BINDER_WORK_DEAD_BINDER:
4204 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4205 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4206 struct binder_ref_death *death;
4207 uint32_t cmd;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004208 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004209
4210 death = container_of(w, struct binder_ref_death, work);
4211 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4212 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4213 else
4214 cmd = BR_DEAD_BINDER;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004215 cookie = death->cookie;
4216
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004217 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004218 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004219 proc->pid, thread->pid,
4220 cmd == BR_DEAD_BINDER ?
4221 "BR_DEAD_BINDER" :
4222 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenf9eac642017-05-22 11:26:23 -07004223 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004224 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenf9eac642017-05-22 11:26:23 -07004225 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004226 kfree(death);
4227 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004228 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004229 binder_enqueue_work_ilocked(
4230 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004231 binder_inner_proc_unlock(proc);
4232 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07004233 if (put_user(cmd, (uint32_t __user *)ptr))
4234 return -EFAULT;
4235 ptr += sizeof(uint32_t);
4236 if (put_user(cookie,
4237 (binder_uintptr_t __user *)ptr))
4238 return -EFAULT;
4239 ptr += sizeof(binder_uintptr_t);
4240 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004241 if (cmd == BR_DEAD_BINDER)
4242 goto done; /* DEAD_BINDER notifications can cause transactions */
4243 } break;
4244 }
4245
4246 if (!t)
4247 continue;
4248
4249 BUG_ON(t->buffer == NULL);
4250 if (t->buffer->target_node) {
4251 struct binder_node *target_node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004252 struct binder_priority node_prio;
Seunghun Lee10f62862014-05-01 01:30:23 +09004253
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004254 tr.target.ptr = target_node->ptr;
4255 tr.cookie = target_node->cookie;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004256 node_prio.sched_policy = target_node->sched_policy;
4257 node_prio.prio = target_node->min_priority;
Martijn Coenenc46810c2017-06-23 10:13:43 -07004258 binder_transaction_priority(current, t, node_prio,
4259 target_node->inherit_rt);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004260 cmd = BR_TRANSACTION;
4261 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004262 tr.target.ptr = 0;
4263 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004264 cmd = BR_REPLY;
4265 }
4266 tr.code = t->code;
4267 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06004268 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004269
Todd Kjos2f993e22017-05-12 14:42:55 -07004270 t_from = binder_get_txn_from(t);
4271 if (t_from) {
4272 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09004273
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004274 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08004275 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004276 } else {
4277 tr.sender_pid = 0;
4278 }
4279
4280 tr.data_size = t->buffer->data_size;
4281 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07004282 tr.data.ptr.buffer = (binder_uintptr_t)
4283 ((uintptr_t)t->buffer->data +
4284 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004285 tr.data.ptr.offsets = tr.data.ptr.buffer +
4286 ALIGN(t->buffer->data_size,
4287 sizeof(void *));
4288
Todd Kjos2f993e22017-05-12 14:42:55 -07004289 if (put_user(cmd, (uint32_t __user *)ptr)) {
4290 if (t_from)
4291 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004292
4293 binder_cleanup_transaction(t, "put_user failed",
4294 BR_FAILED_REPLY);
4295
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004296 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004297 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004298 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07004299 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4300 if (t_from)
4301 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004302
4303 binder_cleanup_transaction(t, "copy_to_user failed",
4304 BR_FAILED_REPLY);
4305
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004306 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004307 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004308 ptr += sizeof(tr);
4309
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004310 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004311 binder_stat_br(proc, thread, cmd);
4312 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004313 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004314 proc->pid, thread->pid,
4315 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4316 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07004317 t->debug_id, t_from ? t_from->proc->pid : 0,
4318 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004319 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004320 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004321
Todd Kjos2f993e22017-05-12 14:42:55 -07004322 if (t_from)
4323 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004324 t->buffer->allow_user_free = 1;
4325 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07004326 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004327 t->to_parent = thread->transaction_stack;
4328 t->to_thread = thread;
4329 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07004330 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004331 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07004332 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004333 }
4334 break;
4335 }
4336
4337done:
4338
4339 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07004340 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004341 if (proc->requested_threads == 0 &&
4342 list_empty(&thread->proc->waiting_threads) &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004343 proc->requested_threads_started < proc->max_threads &&
4344 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4345 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4346 /*spawn a new thread if we leave this out */) {
4347 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07004348 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004349 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304350 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004351 proc->pid, thread->pid);
4352 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4353 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07004354 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07004355 } else
4356 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004357 return 0;
4358}
4359
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004360static void binder_release_work(struct binder_proc *proc,
4361 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004362{
4363 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09004364
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004365 while (1) {
4366 w = binder_dequeue_work_head(proc, list);
4367 if (!w)
4368 return;
4369
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004370 switch (w->type) {
4371 case BINDER_WORK_TRANSACTION: {
4372 struct binder_transaction *t;
4373
4374 t = container_of(w, struct binder_transaction, work);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004375
4376 binder_cleanup_transaction(t, "process died.",
4377 BR_DEAD_REPLY);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004378 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004379 case BINDER_WORK_RETURN_ERROR: {
4380 struct binder_error *e = container_of(
4381 w, struct binder_error, work);
4382
4383 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4384 "undelivered TRANSACTION_ERROR: %u\n",
4385 e->cmd);
4386 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004387 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004388 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304389 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004390 kfree(w);
4391 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4392 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004393 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4394 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4395 struct binder_ref_death *death;
4396
4397 death = container_of(w, struct binder_ref_death, work);
4398 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004399 "undelivered death notification, %016llx\n",
4400 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004401 kfree(death);
4402 binder_stats_deleted(BINDER_STAT_DEATH);
4403 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004404 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304405 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004406 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004407 break;
4408 }
4409 }
4410
4411}
4412
Todd Kjosb4827902017-05-25 15:52:17 -07004413static struct binder_thread *binder_get_thread_ilocked(
4414 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004415{
4416 struct binder_thread *thread = NULL;
4417 struct rb_node *parent = NULL;
4418 struct rb_node **p = &proc->threads.rb_node;
4419
4420 while (*p) {
4421 parent = *p;
4422 thread = rb_entry(parent, struct binder_thread, rb_node);
4423
4424 if (current->pid < thread->pid)
4425 p = &(*p)->rb_left;
4426 else if (current->pid > thread->pid)
4427 p = &(*p)->rb_right;
4428 else
Todd Kjosb4827902017-05-25 15:52:17 -07004429 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004430 }
Todd Kjosb4827902017-05-25 15:52:17 -07004431 if (!new_thread)
4432 return NULL;
4433 thread = new_thread;
4434 binder_stats_created(BINDER_STAT_THREAD);
4435 thread->proc = proc;
4436 thread->pid = current->pid;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004437 get_task_struct(current);
4438 thread->task = current;
Todd Kjosb4827902017-05-25 15:52:17 -07004439 atomic_set(&thread->tmp_ref, 0);
4440 init_waitqueue_head(&thread->wait);
4441 INIT_LIST_HEAD(&thread->todo);
4442 rb_link_node(&thread->rb_node, parent, p);
4443 rb_insert_color(&thread->rb_node, &proc->threads);
4444 thread->looper_need_return = true;
4445 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4446 thread->return_error.cmd = BR_OK;
4447 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4448 thread->reply_error.cmd = BR_OK;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004449 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004450 return thread;
4451}
4452
4453static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4454{
4455 struct binder_thread *thread;
4456 struct binder_thread *new_thread;
4457
4458 binder_inner_proc_lock(proc);
4459 thread = binder_get_thread_ilocked(proc, NULL);
4460 binder_inner_proc_unlock(proc);
4461 if (!thread) {
4462 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4463 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004464 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004465 binder_inner_proc_lock(proc);
4466 thread = binder_get_thread_ilocked(proc, new_thread);
4467 binder_inner_proc_unlock(proc);
4468 if (thread != new_thread)
4469 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004470 }
4471 return thread;
4472}
4473
Todd Kjos2f993e22017-05-12 14:42:55 -07004474static void binder_free_proc(struct binder_proc *proc)
4475{
4476 BUG_ON(!list_empty(&proc->todo));
4477 BUG_ON(!list_empty(&proc->delivered_death));
4478 binder_alloc_deferred_release(&proc->alloc);
4479 put_task_struct(proc->tsk);
4480 binder_stats_deleted(BINDER_STAT_PROC);
4481 kfree(proc);
4482}
4483
4484static void binder_free_thread(struct binder_thread *thread)
4485{
4486 BUG_ON(!list_empty(&thread->todo));
4487 binder_stats_deleted(BINDER_STAT_THREAD);
4488 binder_proc_dec_tmpref(thread->proc);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004489 put_task_struct(thread->task);
Todd Kjos2f993e22017-05-12 14:42:55 -07004490 kfree(thread);
4491}
4492
4493static int binder_thread_release(struct binder_proc *proc,
4494 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004495{
4496 struct binder_transaction *t;
4497 struct binder_transaction *send_reply = NULL;
4498 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004499 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004500
Todd Kjosb4827902017-05-25 15:52:17 -07004501 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004502 /*
4503 * take a ref on the proc so it survives
4504 * after we remove this thread from proc->threads.
4505 * The corresponding dec is when we actually
4506 * free the thread in binder_free_thread()
4507 */
4508 proc->tmp_ref++;
4509 /*
4510 * take a ref on this thread to ensure it
4511 * survives while we are releasing it
4512 */
4513 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004514 rb_erase(&thread->rb_node, &proc->threads);
4515 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004516 if (t) {
4517 spin_lock(&t->lock);
4518 if (t->to_thread == thread)
4519 send_reply = t;
4520 }
4521 thread->is_dead = true;
4522
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004523 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004524 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004525 active_transactions++;
4526 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304527 "release %d:%d transaction %d %s, still active\n",
4528 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004529 t->debug_id,
4530 (t->to_thread == thread) ? "in" : "out");
4531
4532 if (t->to_thread == thread) {
4533 t->to_proc = NULL;
4534 t->to_thread = NULL;
4535 if (t->buffer) {
4536 t->buffer->transaction = NULL;
4537 t->buffer = NULL;
4538 }
4539 t = t->to_parent;
4540 } else if (t->from == thread) {
4541 t->from = NULL;
4542 t = t->from_parent;
4543 } else
4544 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004545 spin_unlock(&last_t->lock);
4546 if (t)
4547 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004548 }
Martijn Coenen550c01d2018-01-05 11:27:07 +01004549
4550 /*
4551 * If this thread used poll, make sure we remove the waitqueue
4552 * from any epoll data structures holding it with POLLFREE.
4553 * waitqueue_active() is safe to use here because we're holding
4554 * the inner lock.
4555 */
4556 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4557 waitqueue_active(&thread->wait)) {
4558 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4559 }
4560
Todd Kjosb4827902017-05-25 15:52:17 -07004561 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004562
Martijn Coenen72766d72018-02-16 09:47:15 +01004563 /*
4564 * This is needed to avoid races between wake_up_poll() above and
4565 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4566 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4567 * lock, so we can be sure it's done after calling synchronize_rcu().
4568 */
4569 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4570 synchronize_rcu();
4571
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004572 if (send_reply)
4573 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004574 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004575 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004576 return active_transactions;
4577}
4578
4579static unsigned int binder_poll(struct file *filp,
4580 struct poll_table_struct *wait)
4581{
4582 struct binder_proc *proc = filp->private_data;
4583 struct binder_thread *thread = NULL;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004584 bool wait_for_proc_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004585
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004586 thread = binder_get_thread(proc);
Greg Kroah-Hartman6e463bb2018-02-28 17:17:14 +01004587 if (!thread)
Eric Biggers4be5a282018-01-30 23:11:24 -08004588 return POLLERR;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004589
Martijn Coenen995a36e2017-06-02 13:36:52 -07004590 binder_inner_proc_lock(thread->proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004591 thread->looper |= BINDER_LOOPER_STATE_POLL;
4592 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4593
Martijn Coenen995a36e2017-06-02 13:36:52 -07004594 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004595
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004596 poll_wait(filp, &thread->wait, wait);
4597
Martijn Coenen47810932017-08-10 12:32:00 +02004598 if (binder_has_work(thread, wait_for_proc_work))
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004599 return POLLIN;
4600
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004601 return 0;
4602}
4603
Tair Rzayev78260ac2014-06-03 22:27:21 +03004604static int binder_ioctl_write_read(struct file *filp,
4605 unsigned int cmd, unsigned long arg,
4606 struct binder_thread *thread)
4607{
4608 int ret = 0;
4609 struct binder_proc *proc = filp->private_data;
4610 unsigned int size = _IOC_SIZE(cmd);
4611 void __user *ubuf = (void __user *)arg;
4612 struct binder_write_read bwr;
4613
4614 if (size != sizeof(struct binder_write_read)) {
4615 ret = -EINVAL;
4616 goto out;
4617 }
4618 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4619 ret = -EFAULT;
4620 goto out;
4621 }
4622 binder_debug(BINDER_DEBUG_READ_WRITE,
4623 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4624 proc->pid, thread->pid,
4625 (u64)bwr.write_size, (u64)bwr.write_buffer,
4626 (u64)bwr.read_size, (u64)bwr.read_buffer);
4627
4628 if (bwr.write_size > 0) {
4629 ret = binder_thread_write(proc, thread,
4630 bwr.write_buffer,
4631 bwr.write_size,
4632 &bwr.write_consumed);
4633 trace_binder_write_done(ret);
4634 if (ret < 0) {
4635 bwr.read_consumed = 0;
4636 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4637 ret = -EFAULT;
4638 goto out;
4639 }
4640 }
4641 if (bwr.read_size > 0) {
4642 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4643 bwr.read_size,
4644 &bwr.read_consumed,
4645 filp->f_flags & O_NONBLOCK);
4646 trace_binder_read_done(ret);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004647 binder_inner_proc_lock(proc);
4648 if (!binder_worklist_empty_ilocked(&proc->todo))
Martijn Coenen053be422017-06-06 15:17:46 -07004649 binder_wakeup_proc_ilocked(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004650 binder_inner_proc_unlock(proc);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004651 if (ret < 0) {
4652 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4653 ret = -EFAULT;
4654 goto out;
4655 }
4656 }
4657 binder_debug(BINDER_DEBUG_READ_WRITE,
4658 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4659 proc->pid, thread->pid,
4660 (u64)bwr.write_consumed, (u64)bwr.write_size,
4661 (u64)bwr.read_consumed, (u64)bwr.read_size);
4662 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4663 ret = -EFAULT;
4664 goto out;
4665 }
4666out:
4667 return ret;
4668}
4669
4670static int binder_ioctl_set_ctx_mgr(struct file *filp)
4671{
4672 int ret = 0;
4673 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004674 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004675 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004676 kuid_t curr_euid = current_euid();
4677
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004678 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004679 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004680 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4681 ret = -EBUSY;
4682 goto out;
4683 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004684 ret = security_binder_set_context_mgr(proc->tsk);
4685 if (ret < 0)
4686 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004687 if (uid_valid(context->binder_context_mgr_uid)) {
4688 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004689 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4690 from_kuid(&init_user_ns, curr_euid),
4691 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004692 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004693 ret = -EPERM;
4694 goto out;
4695 }
4696 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004697 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004698 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004699 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004700 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004701 ret = -ENOMEM;
4702 goto out;
4703 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004704 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004705 new_node->local_weak_refs++;
4706 new_node->local_strong_refs++;
4707 new_node->has_strong_ref = 1;
4708 new_node->has_weak_ref = 1;
4709 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004710 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004711 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004712out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004713 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004714 return ret;
4715}
4716
Colin Cross833babb32017-06-20 13:54:44 -07004717static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4718 struct binder_node_debug_info *info) {
4719 struct rb_node *n;
4720 binder_uintptr_t ptr = info->ptr;
4721
4722 memset(info, 0, sizeof(*info));
4723
4724 binder_inner_proc_lock(proc);
4725 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4726 struct binder_node *node = rb_entry(n, struct binder_node,
4727 rb_node);
4728 if (node->ptr > ptr) {
4729 info->ptr = node->ptr;
4730 info->cookie = node->cookie;
4731 info->has_strong_ref = node->has_strong_ref;
4732 info->has_weak_ref = node->has_weak_ref;
4733 break;
4734 }
4735 }
4736 binder_inner_proc_unlock(proc);
4737
4738 return 0;
4739}
4740
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004741static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4742{
4743 int ret;
4744 struct binder_proc *proc = filp->private_data;
4745 struct binder_thread *thread;
4746 unsigned int size = _IOC_SIZE(cmd);
4747 void __user *ubuf = (void __user *)arg;
4748
Tair Rzayev78260ac2014-06-03 22:27:21 +03004749 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4750 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004751
Sherry Yang435416b2017-06-22 14:37:45 -07004752 binder_selftest_alloc(&proc->alloc);
4753
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004754 trace_binder_ioctl(cmd, arg);
4755
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004756 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4757 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004758 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004759
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004760 thread = binder_get_thread(proc);
4761 if (thread == NULL) {
4762 ret = -ENOMEM;
4763 goto err;
4764 }
4765
4766 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004767 case BINDER_WRITE_READ:
4768 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4769 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004770 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004771 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004772 case BINDER_SET_MAX_THREADS: {
4773 int max_threads;
4774
4775 if (copy_from_user(&max_threads, ubuf,
4776 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004777 ret = -EINVAL;
4778 goto err;
4779 }
Todd Kjosd600e902017-05-25 17:35:02 -07004780 binder_inner_proc_lock(proc);
4781 proc->max_threads = max_threads;
4782 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004783 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004784 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004785 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004786 ret = binder_ioctl_set_ctx_mgr(filp);
4787 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004788 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004789 break;
4790 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304791 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004792 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004793 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004794 thread = NULL;
4795 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004796 case BINDER_VERSION: {
4797 struct binder_version __user *ver = ubuf;
4798
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004799 if (size != sizeof(struct binder_version)) {
4800 ret = -EINVAL;
4801 goto err;
4802 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004803 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4804 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004805 ret = -EINVAL;
4806 goto err;
4807 }
4808 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004809 }
Colin Cross833babb32017-06-20 13:54:44 -07004810 case BINDER_GET_NODE_DEBUG_INFO: {
4811 struct binder_node_debug_info info;
4812
4813 if (copy_from_user(&info, ubuf, sizeof(info))) {
4814 ret = -EFAULT;
4815 goto err;
4816 }
4817
4818 ret = binder_ioctl_get_node_debug_info(proc, &info);
4819 if (ret < 0)
4820 goto err;
4821
4822 if (copy_to_user(ubuf, &info, sizeof(info))) {
4823 ret = -EFAULT;
4824 goto err;
4825 }
4826 break;
4827 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004828 default:
4829 ret = -EINVAL;
4830 goto err;
4831 }
4832 ret = 0;
4833err:
4834 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004835 thread->looper_need_return = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004836 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4837 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304838 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004839err_unlocked:
4840 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004841 return ret;
4842}
4843
4844static void binder_vma_open(struct vm_area_struct *vma)
4845{
4846 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004847
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004848 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304849 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004850 proc->pid, vma->vm_start, vma->vm_end,
4851 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4852 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004853}
4854
4855static void binder_vma_close(struct vm_area_struct *vma)
4856{
4857 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004858
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004859 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304860 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004861 proc->pid, vma->vm_start, vma->vm_end,
4862 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4863 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004864 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004865}
4866
Vinayak Menonddac7d52014-06-02 18:17:59 +05304867static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4868{
4869 return VM_FAULT_SIGBUS;
4870}
4871
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004872static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004873 .open = binder_vma_open,
4874 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304875 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004876};
4877
Todd Kjosd325d372016-10-10 10:40:53 -07004878static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4879{
4880 int ret;
4881 struct binder_proc *proc = filp->private_data;
4882 const char *failure_string;
4883
4884 if (proc->tsk != current->group_leader)
4885 return -EINVAL;
4886
4887 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4888 vma->vm_end = vma->vm_start + SZ_4M;
4889
4890 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4891 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4892 __func__, proc->pid, vma->vm_start, vma->vm_end,
4893 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4894 (unsigned long)pgprot_val(vma->vm_page_prot));
4895
4896 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4897 ret = -EPERM;
4898 failure_string = "bad vm_flags";
4899 goto err_bad_arg;
4900 }
4901 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4902 vma->vm_ops = &binder_vm_ops;
4903 vma->vm_private_data = proc;
4904
4905 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
Todd Kjosf09daf12017-11-10 15:30:27 -08004906
4907 return ret;
Todd Kjosd325d372016-10-10 10:40:53 -07004908
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004909err_bad_arg:
Elad Wexler6b646402017-12-29 11:03:37 +02004910 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004911 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4912 return ret;
4913}
4914
4915static int binder_open(struct inode *nodp, struct file *filp)
4916{
4917 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004918 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004919
Elad Wexler6b646402017-12-29 11:03:37 +02004920 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004921 current->group_leader->pid, current->pid);
4922
4923 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4924 if (proc == NULL)
4925 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004926 spin_lock_init(&proc->inner_lock);
4927 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004928 get_task_struct(current->group_leader);
4929 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004930 INIT_LIST_HEAD(&proc->todo);
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004931 if (binder_supported_policy(current->policy)) {
4932 proc->default_priority.sched_policy = current->policy;
4933 proc->default_priority.prio = current->normal_prio;
4934 } else {
4935 proc->default_priority.sched_policy = SCHED_NORMAL;
4936 proc->default_priority.prio = NICE_TO_PRIO(0);
4937 }
4938
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004939 binder_dev = container_of(filp->private_data, struct binder_device,
4940 miscdev);
4941 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004942 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004943
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004944 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004945 proc->pid = current->group_leader->pid;
4946 INIT_LIST_HEAD(&proc->delivered_death);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004947 INIT_LIST_HEAD(&proc->waiting_threads);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004948 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004949
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004950 mutex_lock(&binder_procs_lock);
4951 hlist_add_head(&proc->proc_node, &binder_procs);
4952 mutex_unlock(&binder_procs_lock);
4953
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004954 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004955 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004956
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004957 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004958 /*
4959 * proc debug entries are shared between contexts, so
4960 * this will fail if the process tries to open the driver
4961 * again with a different context. The priting code will
4962 * anyway print all contexts that a given PID has, so this
4963 * is not a problem.
4964 */
Harsh Shandilya174562a2017-12-22 19:37:02 +05304965 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004966 binder_debugfs_dir_entry_proc,
4967 (void *)(unsigned long)proc->pid,
4968 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004969 }
4970
4971 return 0;
4972}
4973
4974static int binder_flush(struct file *filp, fl_owner_t id)
4975{
4976 struct binder_proc *proc = filp->private_data;
4977
4978 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4979
4980 return 0;
4981}
4982
4983static void binder_deferred_flush(struct binder_proc *proc)
4984{
4985 struct rb_node *n;
4986 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004987
Todd Kjosb4827902017-05-25 15:52:17 -07004988 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004989 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4990 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004991
Todd Kjos6798e6d2017-01-06 14:19:25 -08004992 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004993 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4994 wake_up_interruptible(&thread->wait);
4995 wake_count++;
4996 }
4997 }
Todd Kjosb4827902017-05-25 15:52:17 -07004998 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004999
5000 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5001 "binder_flush: %d woke %d threads\n", proc->pid,
5002 wake_count);
5003}
5004
5005static int binder_release(struct inode *nodp, struct file *filp)
5006{
5007 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09005008
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005009 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005010 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5011
5012 return 0;
5013}
5014
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005015static int binder_node_release(struct binder_node *node, int refs)
5016{
5017 struct binder_ref *ref;
5018 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07005019 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005020
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005021 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005022
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005023 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005024 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005025 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07005026 /*
5027 * The caller must have taken a temporary ref on the node,
5028 */
5029 BUG_ON(!node->tmp_refs);
5030 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07005031 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005032 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07005033 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005034
5035 return refs;
5036 }
5037
5038 node->proc = NULL;
5039 node->local_strong_refs = 0;
5040 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07005041 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005042
5043 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005044 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005045 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005046
5047 hlist_for_each_entry(ref, &node->refs, node_entry) {
5048 refs++;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005049 /*
5050 * Need the node lock to synchronize
5051 * with new notification requests and the
5052 * inner lock to synchronize with queued
5053 * death notifications.
5054 */
5055 binder_inner_proc_lock(ref->proc);
5056 if (!ref->death) {
5057 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08005058 continue;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005059 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005060
5061 death++;
5062
Martijn Coenenf9eac642017-05-22 11:26:23 -07005063 BUG_ON(!list_empty(&ref->death->work.entry));
5064 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5065 binder_enqueue_work_ilocked(&ref->death->work,
5066 &ref->proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07005067 binder_wakeup_proc_ilocked(ref->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005068 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005069 }
5070
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005071 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5072 "node %d now dead, refs %d, death %d\n",
5073 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005074 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07005075 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005076
5077 return refs;
5078}
5079
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005080static void binder_deferred_release(struct binder_proc *proc)
5081{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005082 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005083 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07005084 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005085
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005086 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005087 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005088 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005089
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005090 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005091 if (context->binder_context_mgr_node &&
5092 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005093 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005094 "%s: %d context_mgr_node gone\n",
5095 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005096 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005097 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005098 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07005099 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07005100 /*
5101 * Make sure proc stays alive after we
5102 * remove all the threads
5103 */
5104 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005105
Todd Kjos2f993e22017-05-12 14:42:55 -07005106 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005107 threads = 0;
5108 active_transactions = 0;
5109 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005110 struct binder_thread *thread;
5111
5112 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07005113 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005114 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07005115 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07005116 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005117 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005118
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005119 nodes = 0;
5120 incoming_refs = 0;
5121 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005122 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005123
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005124 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005125 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07005126 /*
5127 * take a temporary ref on the node before
5128 * calling binder_node_release() which will either
5129 * kfree() the node or call binder_put_node()
5130 */
Todd Kjos425d23f2017-06-12 12:07:26 -07005131 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005132 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07005133 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005134 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07005135 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005136 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005137 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005138
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005139 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005140 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005141 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005142 struct binder_ref *ref;
5143
5144 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005145 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07005146 binder_cleanup_ref_olocked(ref);
5147 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005148 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07005149 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005150 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005151 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005152
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005153 binder_release_work(proc, &proc->todo);
5154 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005155
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005156 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07005157 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005158 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07005159 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005160
Todd Kjos2f993e22017-05-12 14:42:55 -07005161 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005162}
5163
5164static void binder_deferred_func(struct work_struct *work)
5165{
5166 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005167 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09005168
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005169 do {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005170 mutex_lock(&binder_deferred_lock);
5171 if (!hlist_empty(&binder_deferred_list)) {
5172 proc = hlist_entry(binder_deferred_list.first,
5173 struct binder_proc, deferred_work_node);
5174 hlist_del_init(&proc->deferred_work_node);
5175 defer = proc->deferred_work;
5176 proc->deferred_work = 0;
5177 } else {
5178 proc = NULL;
5179 defer = 0;
5180 }
5181 mutex_unlock(&binder_deferred_lock);
5182
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005183 if (defer & BINDER_DEFERRED_FLUSH)
5184 binder_deferred_flush(proc);
5185
5186 if (defer & BINDER_DEFERRED_RELEASE)
5187 binder_deferred_release(proc); /* frees proc */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005188 } while (proc);
5189}
5190static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5191
5192static void
5193binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5194{
5195 mutex_lock(&binder_deferred_lock);
5196 proc->deferred_work |= defer;
5197 if (hlist_unhashed(&proc->deferred_work_node)) {
5198 hlist_add_head(&proc->deferred_work_node,
5199 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05305200 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005201 }
5202 mutex_unlock(&binder_deferred_lock);
5203}
5204
Todd Kjos6d241a42017-04-21 14:32:11 -07005205static void print_binder_transaction_ilocked(struct seq_file *m,
5206 struct binder_proc *proc,
5207 const char *prefix,
5208 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005209{
Todd Kjos6d241a42017-04-21 14:32:11 -07005210 struct binder_proc *to_proc;
5211 struct binder_buffer *buffer = t->buffer;
5212
Todd Kjos2f993e22017-05-12 14:42:55 -07005213 spin_lock(&t->lock);
Todd Kjos6d241a42017-04-21 14:32:11 -07005214 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005215 seq_printf(m,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005216 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005217 prefix, t->debug_id, t,
5218 t->from ? t->from->proc->pid : 0,
5219 t->from ? t->from->pid : 0,
Todd Kjos6d241a42017-04-21 14:32:11 -07005220 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005221 t->to_thread ? t->to_thread->pid : 0,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005222 t->code, t->flags, t->priority.sched_policy,
5223 t->priority.prio, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07005224 spin_unlock(&t->lock);
5225
Todd Kjos6d241a42017-04-21 14:32:11 -07005226 if (proc != to_proc) {
5227 /*
5228 * Can only safely deref buffer if we are holding the
5229 * correct proc inner lock for this node
5230 */
5231 seq_puts(m, "\n");
5232 return;
5233 }
5234
5235 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005236 seq_puts(m, " buffer free\n");
5237 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005238 }
Todd Kjos6d241a42017-04-21 14:32:11 -07005239 if (buffer->target_node)
5240 seq_printf(m, " node %d", buffer->target_node->debug_id);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005241 seq_printf(m, " size %zd:%zd data %p\n",
Todd Kjos6d241a42017-04-21 14:32:11 -07005242 buffer->data_size, buffer->offsets_size,
5243 buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005244}
5245
Todd Kjos6d241a42017-04-21 14:32:11 -07005246static void print_binder_work_ilocked(struct seq_file *m,
5247 struct binder_proc *proc,
5248 const char *prefix,
5249 const char *transaction_prefix,
5250 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005251{
5252 struct binder_node *node;
5253 struct binder_transaction *t;
5254
5255 switch (w->type) {
5256 case BINDER_WORK_TRANSACTION:
5257 t = container_of(w, struct binder_transaction, work);
Todd Kjos6d241a42017-04-21 14:32:11 -07005258 print_binder_transaction_ilocked(
5259 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005260 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07005261 case BINDER_WORK_RETURN_ERROR: {
5262 struct binder_error *e = container_of(
5263 w, struct binder_error, work);
5264
5265 seq_printf(m, "%stransaction error: %u\n",
5266 prefix, e->cmd);
5267 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005268 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005269 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005270 break;
5271 case BINDER_WORK_NODE:
5272 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08005273 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5274 prefix, node->debug_id,
5275 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005276 break;
5277 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005278 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005279 break;
5280 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005281 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005282 break;
5283 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005284 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005285 break;
5286 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005287 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005288 break;
5289 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005290}
5291
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005292static void print_binder_thread_ilocked(struct seq_file *m,
5293 struct binder_thread *thread,
5294 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005295{
5296 struct binder_transaction *t;
5297 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005298 size_t start_pos = m->count;
5299 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005300
Todd Kjos2f993e22017-05-12 14:42:55 -07005301 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08005302 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07005303 thread->looper_need_return,
5304 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005305 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005306 t = thread->transaction_stack;
5307 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005308 if (t->from == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005309 print_binder_transaction_ilocked(m, thread->proc,
5310 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005311 t = t->from_parent;
5312 } else if (t->to_thread == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005313 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005314 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005315 t = t->to_parent;
5316 } else {
Todd Kjos6d241a42017-04-21 14:32:11 -07005317 print_binder_transaction_ilocked(m, thread->proc,
5318 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005319 t = NULL;
5320 }
5321 }
5322 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005323 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005324 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005325 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005326 if (!print_always && m->count == header_pos)
5327 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005328}
5329
Todd Kjos425d23f2017-06-12 12:07:26 -07005330static void print_binder_node_nilocked(struct seq_file *m,
5331 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005332{
5333 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005334 struct binder_work *w;
5335 int count;
5336
5337 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08005338 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005339 count++;
5340
Martijn Coenen6aac9792017-06-07 09:29:14 -07005341 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08005342 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Martijn Coenen6aac9792017-06-07 09:29:14 -07005343 node->sched_policy, node->min_priority,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005344 node->has_strong_ref, node->has_weak_ref,
5345 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07005346 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005347 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005348 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08005349 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005350 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005351 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005352 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005353 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005354 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005355 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005356 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005357 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005358}
5359
Todd Kjos5346bf32016-10-20 16:43:34 -07005360static void print_binder_ref_olocked(struct seq_file *m,
5361 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005362{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005363 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005364 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5365 ref->data.debug_id, ref->data.desc,
5366 ref->node->proc ? "" : "dead ",
5367 ref->node->debug_id, ref->data.strong,
5368 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005369 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005370}
5371
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005372static void print_binder_proc(struct seq_file *m,
5373 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005374{
5375 struct binder_work *w;
5376 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005377 size_t start_pos = m->count;
5378 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07005379 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005380
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005381 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005382 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005383 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005384
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005385 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005386 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005387 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005388 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07005389
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005390 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005391 struct binder_node *node = rb_entry(n, struct binder_node,
5392 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005393 /*
5394 * take a temporary reference on the node so it
5395 * survives and isn't removed from the tree
5396 * while we print it.
5397 */
5398 binder_inc_node_tmpref_ilocked(node);
5399 /* Need to drop inner lock to take node lock */
5400 binder_inner_proc_unlock(proc);
5401 if (last_node)
5402 binder_put_node(last_node);
5403 binder_node_inner_lock(node);
5404 print_binder_node_nilocked(m, node);
5405 binder_node_inner_unlock(node);
5406 last_node = node;
5407 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005408 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005409 binder_inner_proc_unlock(proc);
5410 if (last_node)
5411 binder_put_node(last_node);
5412
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005413 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07005414 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005415 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005416 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005417 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07005418 print_binder_ref_olocked(m, rb_entry(n,
5419 struct binder_ref,
5420 rb_node_desc));
5421 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005422 }
Todd Kjosd325d372016-10-10 10:40:53 -07005423 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005424 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005425 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005426 print_binder_work_ilocked(m, proc, " ",
5427 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005428 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005429 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005430 break;
5431 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005432 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005433 if (!print_all && m->count == header_pos)
5434 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005435}
5436
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005437static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005438 "BR_ERROR",
5439 "BR_OK",
5440 "BR_TRANSACTION",
5441 "BR_REPLY",
5442 "BR_ACQUIRE_RESULT",
5443 "BR_DEAD_REPLY",
5444 "BR_TRANSACTION_COMPLETE",
5445 "BR_INCREFS",
5446 "BR_ACQUIRE",
5447 "BR_RELEASE",
5448 "BR_DECREFS",
5449 "BR_ATTEMPT_ACQUIRE",
5450 "BR_NOOP",
5451 "BR_SPAWN_LOOPER",
5452 "BR_FINISHED",
5453 "BR_DEAD_BINDER",
5454 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5455 "BR_FAILED_REPLY"
5456};
5457
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005458static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005459 "BC_TRANSACTION",
5460 "BC_REPLY",
5461 "BC_ACQUIRE_RESULT",
5462 "BC_FREE_BUFFER",
5463 "BC_INCREFS",
5464 "BC_ACQUIRE",
5465 "BC_RELEASE",
5466 "BC_DECREFS",
5467 "BC_INCREFS_DONE",
5468 "BC_ACQUIRE_DONE",
5469 "BC_ATTEMPT_ACQUIRE",
5470 "BC_REGISTER_LOOPER",
5471 "BC_ENTER_LOOPER",
5472 "BC_EXIT_LOOPER",
5473 "BC_REQUEST_DEATH_NOTIFICATION",
5474 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02005475 "BC_DEAD_BINDER_DONE",
5476 "BC_TRANSACTION_SG",
5477 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005478};
5479
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005480static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005481 "proc",
5482 "thread",
5483 "node",
5484 "ref",
5485 "death",
5486 "transaction",
5487 "transaction_complete"
5488};
5489
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005490static void print_binder_stats(struct seq_file *m, const char *prefix,
5491 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005492{
5493 int i;
5494
5495 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005496 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005497 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005498 int temp = atomic_read(&stats->bc[i]);
5499
5500 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005501 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005502 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005503 }
5504
5505 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005506 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005507 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005508 int temp = atomic_read(&stats->br[i]);
5509
5510 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005511 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005512 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005513 }
5514
5515 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005516 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005517 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005518 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005519 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005520 int created = atomic_read(&stats->obj_created[i]);
5521 int deleted = atomic_read(&stats->obj_deleted[i]);
5522
5523 if (created || deleted)
5524 seq_printf(m, "%s%s: active %d total %d\n",
5525 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005526 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005527 created - deleted,
5528 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005529 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005530}
5531
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005532static void print_binder_proc_stats(struct seq_file *m,
5533 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005534{
5535 struct binder_work *w;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005536 struct binder_thread *thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005537 struct rb_node *n;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005538 int count, strong, weak, ready_threads;
Todd Kjosb4827902017-05-25 15:52:17 -07005539 size_t free_async_space =
5540 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005541
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005542 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005543 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005544 count = 0;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005545 ready_threads = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005546 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005547 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5548 count++;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005549
5550 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5551 ready_threads++;
5552
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005553 seq_printf(m, " threads: %d\n", count);
5554 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005555 " ready threads %d\n"
5556 " free async space %zd\n", proc->requested_threads,
5557 proc->requested_threads_started, proc->max_threads,
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005558 ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005559 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005560 count = 0;
5561 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5562 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005563 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005564 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005565 count = 0;
5566 strong = 0;
5567 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005568 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005569 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5570 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5571 rb_node_desc);
5572 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005573 strong += ref->data.strong;
5574 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005575 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005576 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005577 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005578
Todd Kjosd325d372016-10-10 10:40:53 -07005579 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005580 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005581
Sherry Yang91004422017-08-22 17:26:57 -07005582 binder_alloc_print_pages(m, &proc->alloc);
5583
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005584 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005585 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005586 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005587 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005588 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005589 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005590 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005591 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005592
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005593 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005594}
5595
5596
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005597static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005598{
5599 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005600 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005601 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005602
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005603 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005604
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005605 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005606 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005607 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005608 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5609 /*
5610 * take a temporary reference on the node so it
5611 * survives and isn't removed from the list
5612 * while we print it.
5613 */
5614 node->tmp_refs++;
5615 spin_unlock(&binder_dead_nodes_lock);
5616 if (last_node)
5617 binder_put_node(last_node);
5618 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005619 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005620 binder_node_unlock(node);
5621 last_node = node;
5622 spin_lock(&binder_dead_nodes_lock);
5623 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005624 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005625 if (last_node)
5626 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005627
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005628 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005629 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005630 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005631 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005632
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005633 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005634}
5635
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005636static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005637{
5638 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005639
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005640 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005641
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005642 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005643
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005644 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005645 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005646 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005647 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005648
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005649 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005650}
5651
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005652static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005653{
5654 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005655
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005656 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005657 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005658 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005659 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005660 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005661
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005662 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005663}
5664
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005665static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005666{
Riley Andrews83050a42016-02-09 21:05:33 -08005667 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005668 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005669
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005670 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005671 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005672 if (itr->pid == pid) {
5673 seq_puts(m, "binder proc state:\n");
5674 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005675 }
5676 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005677 mutex_unlock(&binder_procs_lock);
5678
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005679 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005680}
5681
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005682static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005683 struct binder_transaction_log_entry *e)
5684{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005685 int debug_id = READ_ONCE(e->debug_id_done);
5686 /*
5687 * read barrier to guarantee debug_id_done read before
5688 * we print the log values
5689 */
5690 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005691 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005692 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005693 e->debug_id, (e->call_type == 2) ? "reply" :
5694 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005695 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005696 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5697 e->return_error, e->return_error_param,
5698 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005699 /*
5700 * read-barrier to guarantee read of debug_id_done after
5701 * done printing the fields of the entry
5702 */
5703 smp_rmb();
5704 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5705 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005706}
5707
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005708static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005709{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005710 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005711 unsigned int log_cur = atomic_read(&log->cur);
5712 unsigned int count;
5713 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005714 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005715
Todd Kjos1cfe6272017-05-24 13:33:28 -07005716 count = log_cur + 1;
5717 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5718 0 : count % ARRAY_SIZE(log->entry);
5719 if (count > ARRAY_SIZE(log->entry) || log->full)
5720 count = ARRAY_SIZE(log->entry);
5721 for (i = 0; i < count; i++) {
5722 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5723
5724 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005725 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005726 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005727}
5728
5729static const struct file_operations binder_fops = {
5730 .owner = THIS_MODULE,
5731 .poll = binder_poll,
5732 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005733 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005734 .mmap = binder_mmap,
5735 .open = binder_open,
5736 .flush = binder_flush,
5737 .release = binder_release,
5738};
5739
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005740BINDER_DEBUG_ENTRY(state);
5741BINDER_DEBUG_ENTRY(stats);
5742BINDER_DEBUG_ENTRY(transactions);
5743BINDER_DEBUG_ENTRY(transaction_log);
5744
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005745static int __init init_binder_device(const char *name)
5746{
5747 int ret;
5748 struct binder_device *binder_device;
5749
5750 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5751 if (!binder_device)
5752 return -ENOMEM;
5753
5754 binder_device->miscdev.fops = &binder_fops;
5755 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5756 binder_device->miscdev.name = name;
5757
5758 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5759 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005760 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005761
5762 ret = misc_register(&binder_device->miscdev);
5763 if (ret < 0) {
5764 kfree(binder_device);
5765 return ret;
5766 }
5767
5768 hlist_add_head(&binder_device->hlist, &binder_devices);
5769
5770 return ret;
5771}
5772
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005773static int __init binder_init(void)
5774{
5775 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005776 char *device_name, *device_names;
5777 struct binder_device *device;
5778 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005779
Tetsuo Handaf8cb8222017-11-29 22:29:47 +09005780 ret = binder_alloc_shrinker_init();
5781 if (ret)
5782 return ret;
Sherry Yang5828d702017-07-29 13:24:11 -07005783
Todd Kjos1cfe6272017-05-24 13:33:28 -07005784 atomic_set(&binder_transaction_log.cur, ~0U);
5785 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5786
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005787 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5788 if (binder_debugfs_dir_entry_root)
5789 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5790 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005791
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005792 if (binder_debugfs_dir_entry_root) {
5793 debugfs_create_file("state",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305794 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005795 binder_debugfs_dir_entry_root,
5796 NULL,
5797 &binder_state_fops);
5798 debugfs_create_file("stats",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305799 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005800 binder_debugfs_dir_entry_root,
5801 NULL,
5802 &binder_stats_fops);
5803 debugfs_create_file("transactions",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305804 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005805 binder_debugfs_dir_entry_root,
5806 NULL,
5807 &binder_transactions_fops);
5808 debugfs_create_file("transaction_log",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305809 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005810 binder_debugfs_dir_entry_root,
5811 &binder_transaction_log,
5812 &binder_transaction_log_fops);
5813 debugfs_create_file("failed_transaction_log",
Harsh Shandilya174562a2017-12-22 19:37:02 +05305814 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005815 binder_debugfs_dir_entry_root,
5816 &binder_transaction_log_failed,
5817 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005818 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005819
5820 /*
5821 * Copy the module_parameter string, because we don't want to
5822 * tokenize it in-place.
5823 */
5824 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5825 if (!device_names) {
5826 ret = -ENOMEM;
5827 goto err_alloc_device_names_failed;
5828 }
5829 strcpy(device_names, binder_devices_param);
5830
5831 while ((device_name = strsep(&device_names, ","))) {
5832 ret = init_binder_device(device_name);
5833 if (ret)
5834 goto err_init_binder_device_failed;
5835 }
5836
5837 return ret;
5838
5839err_init_binder_device_failed:
5840 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5841 misc_deregister(&device->miscdev);
5842 hlist_del(&device->hlist);
5843 kfree(device);
5844 }
5845err_alloc_device_names_failed:
5846 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5847
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005848 return ret;
5849}
5850
5851device_initcall(binder_init);
5852
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005853#define CREATE_TRACE_POINTS
5854#include "binder_trace.h"
5855
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005856MODULE_LICENSE("GPL v2");