blob: 632c8142adcdb936605c8ae0fbbd212f8d3159b0 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
Martijn Coenen22d64e4322017-06-02 11:15:44 -070031 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070079#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070080#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090081
Todd Kjos8d9f6f32016-10-17 12:33:15 -070082static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090083static DEFINE_MUTEX(binder_deferred_lock);
84
Martijn Coenen6b7c7122016-09-30 16:08:09 +020085static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090086static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070087static DEFINE_MUTEX(binder_procs_lock);
88
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090089static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070090static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070092static struct dentry *binder_debugfs_dir_entry_root;
93static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070094static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090095
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070096#define BINDER_DEBUG_ENTRY(name) \
97static int binder_##name##_open(struct inode *inode, struct file *file) \
98{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070099 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700100} \
101\
102static const struct file_operations binder_##name##_fops = { \
103 .owner = THIS_MODULE, \
104 .open = binder_##name##_open, \
105 .read = seq_read, \
106 .llseek = seq_lseek, \
107 .release = single_release, \
108}
109
110static int binder_proc_show(struct seq_file *m, void *unused);
111BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900112
113/* This is only defined in include/asm-arm/sizes.h */
114#ifndef SZ_1K
115#define SZ_1K 0x400
116#endif
117
118#ifndef SZ_4M
119#define SZ_4M 0x400000
120#endif
121
122#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
123
124#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
125
126enum {
127 BINDER_DEBUG_USER_ERROR = 1U << 0,
128 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
129 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
130 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
131 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
132 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
133 BINDER_DEBUG_READ_WRITE = 1U << 6,
134 BINDER_DEBUG_USER_REFS = 1U << 7,
135 BINDER_DEBUG_THREADS = 1U << 8,
136 BINDER_DEBUG_TRANSACTION = 1U << 9,
137 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
138 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
139 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700140 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700141 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900142};
143static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
144 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
145module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
146
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200147static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
148module_param_named(devices, binder_devices_param, charp, S_IRUGO);
149
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900150static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
151static int binder_stop_on_user_error;
152
153static int binder_set_stop_on_user_error(const char *val,
154 struct kernel_param *kp)
155{
156 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900157
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900158 ret = param_set_int(val, kp);
159 if (binder_stop_on_user_error < 2)
160 wake_up(&binder_user_error_wait);
161 return ret;
162}
163module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
164 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
165
166#define binder_debug(mask, x...) \
167 do { \
168 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400169 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900170 } while (0)
171
172#define binder_user_error(x...) \
173 do { \
174 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400175 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900176 if (binder_stop_on_user_error) \
177 binder_stop_on_user_error = 2; \
178 } while (0)
179
Martijn Coenen00c80372016-07-13 12:06:49 +0200180#define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
182
183#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184
Martijn Coenen5a6da532016-09-30 14:10:07 +0200185#define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
187
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200188#define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
190
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900191enum binder_stat_types {
192 BINDER_STAT_PROC,
193 BINDER_STAT_THREAD,
194 BINDER_STAT_NODE,
195 BINDER_STAT_REF,
196 BINDER_STAT_DEATH,
197 BINDER_STAT_TRANSACTION,
198 BINDER_STAT_TRANSACTION_COMPLETE,
199 BINDER_STAT_COUNT
200};
201
202struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700203 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
204 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
205 atomic_t obj_created[BINDER_STAT_COUNT];
206 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900207};
208
209static struct binder_stats binder_stats;
210
211static inline void binder_stats_deleted(enum binder_stat_types type)
212{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700213 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900214}
215
216static inline void binder_stats_created(enum binder_stat_types type)
217{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700218 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900219}
220
221struct binder_transaction_log_entry {
222 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700223 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900224 int call_type;
225 int from_proc;
226 int from_thread;
227 int target_handle;
228 int to_proc;
229 int to_thread;
230 int to_node;
231 int data_size;
232 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700233 int return_error_line;
234 uint32_t return_error;
235 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200236 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900237};
238struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700239 atomic_t cur;
240 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900241 struct binder_transaction_log_entry entry[32];
242};
243static struct binder_transaction_log binder_transaction_log;
244static struct binder_transaction_log binder_transaction_log_failed;
245
246static struct binder_transaction_log_entry *binder_transaction_log_add(
247 struct binder_transaction_log *log)
248{
249 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700250 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900251
Todd Kjos1cfe6272017-05-24 13:33:28 -0700252 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900253 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700254 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
255 WRITE_ONCE(e->debug_id_done, 0);
256 /*
257 * write-barrier to synchronize access to e->debug_id_done.
258 * We make sure the initialized 0 value is seen before
259 * memset() other fields are zeroed by memset.
260 */
261 smp_wmb();
262 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900263 return e;
264}
265
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200266struct binder_context {
267 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700268 struct mutex context_mgr_node_lock;
269
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200270 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200271 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200272};
273
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200274struct binder_device {
275 struct hlist_node hlist;
276 struct miscdevice miscdev;
277 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200278};
279
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700280/**
281 * struct binder_work - work enqueued on a worklist
282 * @entry: node enqueued on list
283 * @type: type of work to be performed
284 *
285 * There are separate work lists for proc, thread, and node (async).
286 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900287struct binder_work {
288 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700289
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900290 enum {
291 BINDER_WORK_TRANSACTION = 1,
292 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700293 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900294 BINDER_WORK_NODE,
295 BINDER_WORK_DEAD_BINDER,
296 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
297 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
298 } type;
299};
300
Todd Kjos858b8da2017-04-21 17:35:12 -0700301struct binder_error {
302 struct binder_work work;
303 uint32_t cmd;
304};
305
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700306/**
307 * struct binder_node - binder node bookkeeping
308 * @debug_id: unique ID for debugging
309 * (invariant after initialized)
310 * @lock: lock for node fields
311 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700312 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700313 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700314 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700315 * @dead_node: element for binder_dead_nodes list
316 * (protected by binder_dead_nodes_lock)
317 * @proc: binder_proc that owns this node
318 * (invariant after initialized)
319 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700320 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700323 * (protected by @proc->inner_lock if @proc
324 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700325 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700326 * (protected by @proc->inner_lock if @proc
327 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700328 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700329 * (protected by @proc->inner_lock if @proc
330 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700331 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700342 * (protected by @proc->inner_lock if @proc
343 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700344 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700345 * (protected by @proc->inner_lock if @proc
346 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700347 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700348 * (protected by @proc->inner_lock if @proc
349 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700350 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700351 * (protected by @proc->inner_lock if @proc
352 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700353 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700354 * (protected by @lock)
Martijn Coenen6aac9792017-06-07 09:29:14 -0700355 * @sched_policy: minimum scheduling policy for node
356 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
Martijn Coenenc46810c2017-06-23 10:13:43 -0700361 * @inherit_rt: inherit RT scheduling policy from caller
362 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700363 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700364 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700365 *
366 * Bookkeeping structure for binder nodes.
367 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900368struct binder_node {
369 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700370 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900371 struct binder_work work;
372 union {
373 struct rb_node rb_node;
374 struct hlist_node dead_node;
375 };
376 struct binder_proc *proc;
377 struct hlist_head refs;
378 int internal_strong_refs;
379 int local_weak_refs;
380 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700381 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800382 binder_uintptr_t ptr;
383 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700384 struct {
385 /*
386 * bitfield elements protected by
387 * proc inner_lock
388 */
389 u8 has_strong_ref:1;
390 u8 pending_strong_ref:1;
391 u8 has_weak_ref:1;
392 u8 pending_weak_ref:1;
393 };
394 struct {
395 /*
396 * invariant after initialization
397 */
Martijn Coenen6aac9792017-06-07 09:29:14 -0700398 u8 sched_policy:2;
Martijn Coenenc46810c2017-06-23 10:13:43 -0700399 u8 inherit_rt:1;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700400 u8 accept_fds:1;
401 u8 min_priority;
402 };
403 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900404 struct list_head async_todo;
405};
406
407struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700408 /**
409 * @work: worklist element for death notifications
410 * (protected by inner_lock of the proc that
411 * this ref belongs to)
412 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900413 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800414 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900415};
416
Todd Kjosb0117bb2017-05-08 09:16:27 -0700417/**
418 * struct binder_ref_data - binder_ref counts and id
419 * @debug_id: unique ID for the ref
420 * @desc: unique userspace handle for ref
421 * @strong: strong ref count (debugging only if not locked)
422 * @weak: weak ref count (debugging only if not locked)
423 *
424 * Structure to hold ref count and ref id information. Since
425 * the actual ref can only be accessed with a lock, this structure
426 * is used to return information about the ref to callers of
427 * ref inc/dec functions.
428 */
429struct binder_ref_data {
430 int debug_id;
431 uint32_t desc;
432 int strong;
433 int weak;
434};
435
436/**
437 * struct binder_ref - struct to track references on nodes
438 * @data: binder_ref_data containing id, handle, and current refcounts
439 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
440 * @rb_node_node: node for lookup by @node in proc's rb_tree
441 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700442 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700443 * @proc: binder_proc containing ref
444 * @node: binder_node of target node. When cleaning up a
445 * ref for deletion in binder_cleanup_ref, a non-NULL
446 * @node indicates the node must be freed
447 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenf9eac642017-05-22 11:26:23 -0700448 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700449 *
450 * Structure to track references from procA to target node (on procB). This
451 * structure is unsafe to access without holding @proc->outer_lock.
452 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900453struct binder_ref {
454 /* Lookups needed: */
455 /* node + proc => ref (transaction) */
456 /* desc + proc => ref (transaction, inc/dec ref) */
457 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700458 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900459 struct rb_node rb_node_desc;
460 struct rb_node rb_node_node;
461 struct hlist_node node_entry;
462 struct binder_proc *proc;
463 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900464 struct binder_ref_death *death;
465};
466
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900467enum binder_deferred_state {
468 BINDER_DEFERRED_PUT_FILES = 0x01,
469 BINDER_DEFERRED_FLUSH = 0x02,
470 BINDER_DEFERRED_RELEASE = 0x04,
471};
472
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700473/**
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700474 * struct binder_priority - scheduler policy and priority
475 * @sched_policy scheduler policy
476 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
477 *
478 * The binder driver supports inheriting the following scheduler policies:
479 * SCHED_NORMAL
480 * SCHED_BATCH
481 * SCHED_FIFO
482 * SCHED_RR
483 */
484struct binder_priority {
485 unsigned int sched_policy;
486 int prio;
487};
488
489/**
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700490 * struct binder_proc - binder process bookkeeping
491 * @proc_node: element for binder_procs list
492 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700493 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700494 * @nodes: rbtree of binder nodes associated with
495 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700496 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700497 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700498 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700499 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700500 * (protected by @outer_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700501 * @waiting_threads: threads currently waiting for proc work
502 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700503 * @pid PID of group_leader of process
504 * (invariant after initialized)
505 * @tsk task_struct for group_leader of process
506 * (invariant after initialized)
507 * @files files_struct for process
508 * (invariant after initialized)
509 * @deferred_work_node: element for binder_deferred_list
510 * (protected by binder_deferred_lock)
511 * @deferred_work: bitmap of deferred work to perform
512 * (protected by binder_deferred_lock)
513 * @is_dead: process is dead and awaiting free
514 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700515 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700516 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700517 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700518 * @wait: wait queue head to wait for proc work
519 * (invariant after initialized)
520 * @stats: per-process binder statistics
521 * (atomics, no lock needed)
522 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700523 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700524 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700525 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700526 * @requested_threads: number of binder threads requested but not
527 * yet started. In current implementation, can
528 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700529 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700530 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700531 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700532 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700533 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700534 * @default_priority: default scheduler priority
535 * (invariant after initialized)
536 * @debugfs_entry: debugfs node
537 * @alloc: binder allocator bookkeeping
538 * @context: binder_context for this proc
539 * (invariant after initialized)
540 * @inner_lock: can nest under outer_lock and/or node lock
541 * @outer_lock: no nesting under innor or node lock
542 * Lock order: 1) outer, 2) node, 3) inner
543 *
544 * Bookkeeping structure for binder processes
545 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900546struct binder_proc {
547 struct hlist_node proc_node;
548 struct rb_root threads;
549 struct rb_root nodes;
550 struct rb_root refs_by_desc;
551 struct rb_root refs_by_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700552 struct list_head waiting_threads;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900553 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900554 struct task_struct *tsk;
555 struct files_struct *files;
556 struct hlist_node deferred_work_node;
557 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700558 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900559
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900560 struct list_head todo;
561 wait_queue_head_t wait;
562 struct binder_stats stats;
563 struct list_head delivered_death;
564 int max_threads;
565 int requested_threads;
566 int requested_threads_started;
Todd Kjos2f993e22017-05-12 14:42:55 -0700567 int tmp_ref;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700568 struct binder_priority default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700569 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700570 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200571 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700572 spinlock_t inner_lock;
573 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900574};
575
576enum {
577 BINDER_LOOPER_STATE_REGISTERED = 0x01,
578 BINDER_LOOPER_STATE_ENTERED = 0x02,
579 BINDER_LOOPER_STATE_EXITED = 0x04,
580 BINDER_LOOPER_STATE_INVALID = 0x08,
581 BINDER_LOOPER_STATE_WAITING = 0x10,
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700582 BINDER_LOOPER_STATE_POLL = 0x20,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900583};
584
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700585/**
586 * struct binder_thread - binder thread bookkeeping
587 * @proc: binder process for this thread
588 * (invariant after initialization)
589 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700590 * (protected by @proc->inner_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700591 * @waiting_thread_node: element for @proc->waiting_threads list
592 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700593 * @pid: PID for this thread
594 * (invariant after initialization)
595 * @looper: bitmap of looping state
596 * (only accessed by this thread)
597 * @looper_needs_return: looping thread needs to exit driver
598 * (no lock needed)
599 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700600 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700601 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700602 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700603 * @return_error: transaction errors reported by this thread
604 * (only accessed by this thread)
605 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700606 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700607 * @wait: wait queue for thread work
608 * @stats: per-thread statistics
609 * (atomics, no lock needed)
610 * @tmp_ref: temporary reference to indicate thread is in use
611 * (atomic since @proc->inner_lock cannot
612 * always be acquired)
613 * @is_dead: thread is dead and awaiting free
614 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700615 * (protected by @proc->inner_lock)
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700616 * @task: struct task_struct for this thread
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700617 *
618 * Bookkeeping structure for binder threads.
619 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900620struct binder_thread {
621 struct binder_proc *proc;
622 struct rb_node rb_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700623 struct list_head waiting_thread_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900624 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800625 int looper; /* only modified by this thread */
626 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900627 struct binder_transaction *transaction_stack;
628 struct list_head todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700629 struct binder_error return_error;
630 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900631 wait_queue_head_t wait;
632 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700633 atomic_t tmp_ref;
634 bool is_dead;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700635 struct task_struct *task;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900636};
637
638struct binder_transaction {
639 int debug_id;
640 struct binder_work work;
641 struct binder_thread *from;
642 struct binder_transaction *from_parent;
643 struct binder_proc *to_proc;
644 struct binder_thread *to_thread;
645 struct binder_transaction *to_parent;
646 unsigned need_reply:1;
647 /* unsigned is_dead:1; */ /* not used at the moment */
648
649 struct binder_buffer *buffer;
650 unsigned int code;
651 unsigned int flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700652 struct binder_priority priority;
653 struct binder_priority saved_priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700654 bool set_priority_called;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600655 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700656 /**
657 * @lock: protects @from, @to_proc, and @to_thread
658 *
659 * @from, @to_proc, and @to_thread can be set to NULL
660 * during thread teardown
661 */
662 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900663};
664
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700665/**
666 * binder_proc_lock() - Acquire outer lock for given binder_proc
667 * @proc: struct binder_proc to acquire
668 *
669 * Acquires proc->outer_lock. Used to protect binder_ref
670 * structures associated with the given proc.
671 */
672#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
673static void
674_binder_proc_lock(struct binder_proc *proc, int line)
675{
676 binder_debug(BINDER_DEBUG_SPINLOCKS,
677 "%s: line=%d\n", __func__, line);
678 spin_lock(&proc->outer_lock);
679}
680
681/**
682 * binder_proc_unlock() - Release spinlock for given binder_proc
683 * @proc: struct binder_proc to acquire
684 *
685 * Release lock acquired via binder_proc_lock()
686 */
687#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
688static void
689_binder_proc_unlock(struct binder_proc *proc, int line)
690{
691 binder_debug(BINDER_DEBUG_SPINLOCKS,
692 "%s: line=%d\n", __func__, line);
693 spin_unlock(&proc->outer_lock);
694}
695
696/**
697 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
698 * @proc: struct binder_proc to acquire
699 *
700 * Acquires proc->inner_lock. Used to protect todo lists
701 */
702#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
703static void
704_binder_inner_proc_lock(struct binder_proc *proc, int line)
705{
706 binder_debug(BINDER_DEBUG_SPINLOCKS,
707 "%s: line=%d\n", __func__, line);
708 spin_lock(&proc->inner_lock);
709}
710
711/**
712 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
713 * @proc: struct binder_proc to acquire
714 *
715 * Release lock acquired via binder_inner_proc_lock()
716 */
717#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
718static void
719_binder_inner_proc_unlock(struct binder_proc *proc, int line)
720{
721 binder_debug(BINDER_DEBUG_SPINLOCKS,
722 "%s: line=%d\n", __func__, line);
723 spin_unlock(&proc->inner_lock);
724}
725
726/**
727 * binder_node_lock() - Acquire spinlock for given binder_node
728 * @node: struct binder_node to acquire
729 *
730 * Acquires node->lock. Used to protect binder_node fields
731 */
732#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
733static void
734_binder_node_lock(struct binder_node *node, int line)
735{
736 binder_debug(BINDER_DEBUG_SPINLOCKS,
737 "%s: line=%d\n", __func__, line);
738 spin_lock(&node->lock);
739}
740
741/**
742 * binder_node_unlock() - Release spinlock for given binder_proc
743 * @node: struct binder_node to acquire
744 *
745 * Release lock acquired via binder_node_lock()
746 */
747#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
748static void
749_binder_node_unlock(struct binder_node *node, int line)
750{
751 binder_debug(BINDER_DEBUG_SPINLOCKS,
752 "%s: line=%d\n", __func__, line);
753 spin_unlock(&node->lock);
754}
755
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700756/**
757 * binder_node_inner_lock() - Acquire node and inner locks
758 * @node: struct binder_node to acquire
759 *
760 * Acquires node->lock. If node->proc also acquires
761 * proc->inner_lock. Used to protect binder_node fields
762 */
763#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
764static void
765_binder_node_inner_lock(struct binder_node *node, int line)
766{
767 binder_debug(BINDER_DEBUG_SPINLOCKS,
768 "%s: line=%d\n", __func__, line);
769 spin_lock(&node->lock);
770 if (node->proc)
771 binder_inner_proc_lock(node->proc);
772}
773
774/**
775 * binder_node_unlock() - Release node and inner locks
776 * @node: struct binder_node to acquire
777 *
778 * Release lock acquired via binder_node_lock()
779 */
780#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
781static void
782_binder_node_inner_unlock(struct binder_node *node, int line)
783{
784 struct binder_proc *proc = node->proc;
785
786 binder_debug(BINDER_DEBUG_SPINLOCKS,
787 "%s: line=%d\n", __func__, line);
788 if (proc)
789 binder_inner_proc_unlock(proc);
790 spin_unlock(&node->lock);
791}
792
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700793static bool binder_worklist_empty_ilocked(struct list_head *list)
794{
795 return list_empty(list);
796}
797
798/**
799 * binder_worklist_empty() - Check if no items on the work list
800 * @proc: binder_proc associated with list
801 * @list: list to check
802 *
803 * Return: true if there are no items on list, else false
804 */
805static bool binder_worklist_empty(struct binder_proc *proc,
806 struct list_head *list)
807{
808 bool ret;
809
810 binder_inner_proc_lock(proc);
811 ret = binder_worklist_empty_ilocked(list);
812 binder_inner_proc_unlock(proc);
813 return ret;
814}
815
816static void
817binder_enqueue_work_ilocked(struct binder_work *work,
818 struct list_head *target_list)
819{
820 BUG_ON(target_list == NULL);
821 BUG_ON(work->entry.next && !list_empty(&work->entry));
822 list_add_tail(&work->entry, target_list);
823}
824
825/**
826 * binder_enqueue_work() - Add an item to the work list
827 * @proc: binder_proc associated with list
828 * @work: struct binder_work to add to list
829 * @target_list: list to add work to
830 *
831 * Adds the work to the specified list. Asserts that work
832 * is not already on a list.
833 */
834static void
835binder_enqueue_work(struct binder_proc *proc,
836 struct binder_work *work,
837 struct list_head *target_list)
838{
839 binder_inner_proc_lock(proc);
840 binder_enqueue_work_ilocked(work, target_list);
841 binder_inner_proc_unlock(proc);
842}
843
844static void
845binder_dequeue_work_ilocked(struct binder_work *work)
846{
847 list_del_init(&work->entry);
848}
849
850/**
851 * binder_dequeue_work() - Removes an item from the work list
852 * @proc: binder_proc associated with list
853 * @work: struct binder_work to remove from list
854 *
855 * Removes the specified work item from whatever list it is on.
856 * Can safely be called if work is not on any list.
857 */
858static void
859binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
860{
861 binder_inner_proc_lock(proc);
862 binder_dequeue_work_ilocked(work);
863 binder_inner_proc_unlock(proc);
864}
865
866static struct binder_work *binder_dequeue_work_head_ilocked(
867 struct list_head *list)
868{
869 struct binder_work *w;
870
871 w = list_first_entry_or_null(list, struct binder_work, entry);
872 if (w)
873 list_del_init(&w->entry);
874 return w;
875}
876
877/**
878 * binder_dequeue_work_head() - Dequeues the item at head of list
879 * @proc: binder_proc associated with list
880 * @list: list to dequeue head
881 *
882 * Removes the head of the list if there are items on the list
883 *
884 * Return: pointer dequeued binder_work, NULL if list was empty
885 */
886static struct binder_work *binder_dequeue_work_head(
887 struct binder_proc *proc,
888 struct list_head *list)
889{
890 struct binder_work *w;
891
892 binder_inner_proc_lock(proc);
893 w = binder_dequeue_work_head_ilocked(list);
894 binder_inner_proc_unlock(proc);
895 return w;
896}
897
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900898static void
899binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700900static void binder_free_thread(struct binder_thread *thread);
901static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700902static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900903
Sachin Kamatefde99c2012-08-17 16:39:36 +0530904static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900905{
906 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900907 unsigned long rlim_cur;
908 unsigned long irqs;
909
910 if (files == NULL)
911 return -ESRCH;
912
Al Virodcfadfa2012-08-12 17:27:30 -0400913 if (!lock_task_sighand(proc->tsk, &irqs))
914 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900915
Al Virodcfadfa2012-08-12 17:27:30 -0400916 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
917 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900918
Al Virodcfadfa2012-08-12 17:27:30 -0400919 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900920}
921
922/*
923 * copied from fd_install
924 */
925static void task_fd_install(
926 struct binder_proc *proc, unsigned int fd, struct file *file)
927{
Al Virof869e8a2012-08-15 21:06:33 -0400928 if (proc->files)
929 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900930}
931
932/*
933 * copied from sys_close
934 */
935static long task_close_fd(struct binder_proc *proc, unsigned int fd)
936{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900937 int retval;
938
Al Viro483ce1d2012-08-19 12:04:24 -0400939 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900940 return -ESRCH;
941
Al Viro483ce1d2012-08-19 12:04:24 -0400942 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900943 /* can't restart close syscall because file table entry was cleared */
944 if (unlikely(retval == -ERESTARTSYS ||
945 retval == -ERESTARTNOINTR ||
946 retval == -ERESTARTNOHAND ||
947 retval == -ERESTART_RESTARTBLOCK))
948 retval = -EINTR;
949
950 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900951}
952
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700953static bool binder_has_work_ilocked(struct binder_thread *thread,
954 bool do_proc_work)
955{
956 return !binder_worklist_empty_ilocked(&thread->todo) ||
957 thread->looper_need_return ||
958 (do_proc_work &&
959 !binder_worklist_empty_ilocked(&thread->proc->todo));
960}
961
962static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
963{
964 bool has_work;
965
966 binder_inner_proc_lock(thread->proc);
967 has_work = binder_has_work_ilocked(thread, do_proc_work);
968 binder_inner_proc_unlock(thread->proc);
969
970 return has_work;
971}
972
973static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
974{
975 return !thread->transaction_stack &&
976 binder_worklist_empty_ilocked(&thread->todo) &&
977 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
978 BINDER_LOOPER_STATE_REGISTERED));
979}
980
981static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
982 bool sync)
983{
984 struct rb_node *n;
985 struct binder_thread *thread;
986
987 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
988 thread = rb_entry(n, struct binder_thread, rb_node);
989 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
990 binder_available_for_proc_work_ilocked(thread)) {
991 if (sync)
992 wake_up_interruptible_sync(&thread->wait);
993 else
994 wake_up_interruptible(&thread->wait);
995 }
996 }
997}
998
Martijn Coenen053be422017-06-06 15:17:46 -0700999/**
1000 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1001 * @proc: process to select a thread from
1002 *
1003 * Note that calling this function moves the thread off the waiting_threads
1004 * list, so it can only be woken up by the caller of this function, or a
1005 * signal. Therefore, callers *should* always wake up the thread this function
1006 * returns.
1007 *
1008 * Return: If there's a thread currently waiting for process work,
1009 * returns that thread. Otherwise returns NULL.
1010 */
1011static struct binder_thread *
1012binder_select_thread_ilocked(struct binder_proc *proc)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001013{
1014 struct binder_thread *thread;
1015
1016 BUG_ON(!spin_is_locked(&proc->inner_lock));
1017 thread = list_first_entry_or_null(&proc->waiting_threads,
1018 struct binder_thread,
1019 waiting_thread_node);
1020
Martijn Coenen053be422017-06-06 15:17:46 -07001021 if (thread)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001022 list_del_init(&thread->waiting_thread_node);
Martijn Coenen053be422017-06-06 15:17:46 -07001023
1024 return thread;
1025}
1026
1027/**
1028 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1029 * @proc: process to wake up a thread in
1030 * @thread: specific thread to wake-up (may be NULL)
1031 * @sync: whether to do a synchronous wake-up
1032 *
1033 * This function wakes up a thread in the @proc process.
1034 * The caller may provide a specific thread to wake-up in
1035 * the @thread parameter. If @thread is NULL, this function
1036 * will wake up threads that have called poll().
1037 *
1038 * Note that for this function to work as expected, callers
1039 * should first call binder_select_thread() to find a thread
1040 * to handle the work (if they don't have a thread already),
1041 * and pass the result into the @thread parameter.
1042 */
1043static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1044 struct binder_thread *thread,
1045 bool sync)
1046{
1047 BUG_ON(!spin_is_locked(&proc->inner_lock));
1048
1049 if (thread) {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001050 if (sync)
1051 wake_up_interruptible_sync(&thread->wait);
1052 else
1053 wake_up_interruptible(&thread->wait);
1054 return;
1055 }
1056
1057 /* Didn't find a thread waiting for proc work; this can happen
1058 * in two scenarios:
1059 * 1. All threads are busy handling transactions
1060 * In that case, one of those threads should call back into
1061 * the kernel driver soon and pick up this work.
1062 * 2. Threads are using the (e)poll interface, in which case
1063 * they may be blocked on the waitqueue without having been
1064 * added to waiting_threads. For this case, we just iterate
1065 * over all threads not handling transaction work, and
1066 * wake them all up. We wake all because we don't know whether
1067 * a thread that called into (e)poll is handling non-binder
1068 * work currently.
1069 */
1070 binder_wakeup_poll_threads_ilocked(proc, sync);
1071}
1072
Martijn Coenen053be422017-06-06 15:17:46 -07001073static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1074{
1075 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1076
1077 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1078}
1079
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001080static bool is_rt_policy(int policy)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001081{
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001082 return policy == SCHED_FIFO || policy == SCHED_RR;
1083}
Seunghun Lee10f62862014-05-01 01:30:23 +09001084
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001085static bool is_fair_policy(int policy)
1086{
1087 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1088}
1089
1090static bool binder_supported_policy(int policy)
1091{
1092 return is_fair_policy(policy) || is_rt_policy(policy);
1093}
1094
1095static int to_userspace_prio(int policy, int kernel_priority)
1096{
1097 if (is_fair_policy(policy))
1098 return PRIO_TO_NICE(kernel_priority);
1099 else
1100 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1101}
1102
1103static int to_kernel_prio(int policy, int user_priority)
1104{
1105 if (is_fair_policy(policy))
1106 return NICE_TO_PRIO(user_priority);
1107 else
1108 return MAX_USER_RT_PRIO - 1 - user_priority;
1109}
1110
Martijn Coenenecd972d2017-05-26 10:48:56 -07001111static void binder_do_set_priority(struct task_struct *task,
1112 struct binder_priority desired,
1113 bool verify)
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001114{
1115 int priority; /* user-space prio value */
1116 bool has_cap_nice;
1117 unsigned int policy = desired.sched_policy;
1118
1119 if (task->policy == policy && task->normal_prio == desired.prio)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001120 return;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001121
1122 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1123
1124 priority = to_userspace_prio(policy, desired.prio);
1125
Martijn Coenenecd972d2017-05-26 10:48:56 -07001126 if (verify && is_rt_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001127 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1128
1129 if (max_rtprio == 0) {
1130 policy = SCHED_NORMAL;
1131 priority = MIN_NICE;
1132 } else if (priority > max_rtprio) {
1133 priority = max_rtprio;
1134 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001135 }
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001136
Martijn Coenenecd972d2017-05-26 10:48:56 -07001137 if (verify && is_fair_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001138 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1139
1140 if (min_nice > MAX_NICE) {
1141 binder_user_error("%d RLIMIT_NICE not set\n",
1142 task->pid);
1143 return;
1144 } else if (priority < min_nice) {
1145 priority = min_nice;
1146 }
1147 }
1148
1149 if (policy != desired.sched_policy ||
1150 to_kernel_prio(policy, priority) != desired.prio)
1151 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1152 "%d: priority %d not allowed, using %d instead\n",
1153 task->pid, desired.prio,
1154 to_kernel_prio(policy, priority));
1155
1156 /* Set the actual priority */
1157 if (task->policy != policy || is_rt_policy(policy)) {
1158 struct sched_param params;
1159
1160 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1161
1162 sched_setscheduler_nocheck(task,
1163 policy | SCHED_RESET_ON_FORK,
1164 &params);
1165 }
1166 if (is_fair_policy(policy))
1167 set_user_nice(task, priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001168}
1169
Martijn Coenenecd972d2017-05-26 10:48:56 -07001170static void binder_set_priority(struct task_struct *task,
1171 struct binder_priority desired)
1172{
1173 binder_do_set_priority(task, desired, /* verify = */ true);
1174}
1175
1176static void binder_restore_priority(struct task_struct *task,
1177 struct binder_priority desired)
1178{
1179 binder_do_set_priority(task, desired, /* verify = */ false);
1180}
1181
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001182static void binder_transaction_priority(struct task_struct *task,
1183 struct binder_transaction *t,
Martijn Coenenc46810c2017-06-23 10:13:43 -07001184 struct binder_priority node_prio,
1185 bool inherit_rt)
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001186{
1187 struct binder_priority desired_prio;
1188
1189 if (t->set_priority_called)
1190 return;
1191
1192 t->set_priority_called = true;
1193 t->saved_priority.sched_policy = task->policy;
1194 t->saved_priority.prio = task->normal_prio;
1195
Martijn Coenenc46810c2017-06-23 10:13:43 -07001196 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1197 desired_prio.prio = NICE_TO_PRIO(0);
1198 desired_prio.sched_policy = SCHED_NORMAL;
1199 } else {
1200 desired_prio.prio = t->priority.prio;
1201 desired_prio.sched_policy = t->priority.sched_policy;
1202 }
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001203
1204 if (node_prio.prio < t->priority.prio ||
1205 (node_prio.prio == t->priority.prio &&
1206 node_prio.sched_policy == SCHED_FIFO)) {
1207 /*
1208 * In case the minimum priority on the node is
1209 * higher (lower value), use that priority. If
1210 * the priority is the same, but the node uses
1211 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1212 * run unbounded, unlike SCHED_RR.
1213 */
1214 desired_prio = node_prio;
1215 }
1216
1217 binder_set_priority(task, desired_prio);
1218}
1219
Todd Kjos425d23f2017-06-12 12:07:26 -07001220static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1221 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001222{
1223 struct rb_node *n = proc->nodes.rb_node;
1224 struct binder_node *node;
1225
Todd Kjos425d23f2017-06-12 12:07:26 -07001226 BUG_ON(!spin_is_locked(&proc->inner_lock));
1227
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001228 while (n) {
1229 node = rb_entry(n, struct binder_node, rb_node);
1230
1231 if (ptr < node->ptr)
1232 n = n->rb_left;
1233 else if (ptr > node->ptr)
1234 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -07001235 else {
1236 /*
1237 * take an implicit weak reference
1238 * to ensure node stays alive until
1239 * call to binder_put_node()
1240 */
Todd Kjos425d23f2017-06-12 12:07:26 -07001241 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001242 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001243 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001244 }
1245 return NULL;
1246}
1247
Todd Kjos425d23f2017-06-12 12:07:26 -07001248static struct binder_node *binder_get_node(struct binder_proc *proc,
1249 binder_uintptr_t ptr)
1250{
1251 struct binder_node *node;
1252
1253 binder_inner_proc_lock(proc);
1254 node = binder_get_node_ilocked(proc, ptr);
1255 binder_inner_proc_unlock(proc);
1256 return node;
1257}
1258
1259static struct binder_node *binder_init_node_ilocked(
1260 struct binder_proc *proc,
1261 struct binder_node *new_node,
1262 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001263{
1264 struct rb_node **p = &proc->nodes.rb_node;
1265 struct rb_node *parent = NULL;
1266 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001267 binder_uintptr_t ptr = fp ? fp->binder : 0;
1268 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1269 __u32 flags = fp ? fp->flags : 0;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001270 s8 priority;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001271
Todd Kjos425d23f2017-06-12 12:07:26 -07001272 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001273 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001274
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001275 parent = *p;
1276 node = rb_entry(parent, struct binder_node, rb_node);
1277
1278 if (ptr < node->ptr)
1279 p = &(*p)->rb_left;
1280 else if (ptr > node->ptr)
1281 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001282 else {
1283 /*
1284 * A matching node is already in
1285 * the rb tree. Abandon the init
1286 * and return it.
1287 */
1288 binder_inc_node_tmpref_ilocked(node);
1289 return node;
1290 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001291 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001292 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001293 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001294 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001295 rb_link_node(&node->rb_node, parent, p);
1296 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001297 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001298 node->proc = proc;
1299 node->ptr = ptr;
1300 node->cookie = cookie;
1301 node->work.type = BINDER_WORK_NODE;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001302 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1303 node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
1304 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1305 node->min_priority = to_kernel_prio(node->sched_policy, priority);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001306 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Martijn Coenenc46810c2017-06-23 10:13:43 -07001307 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001308 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001309 INIT_LIST_HEAD(&node->work.entry);
1310 INIT_LIST_HEAD(&node->async_todo);
1311 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001312 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001313 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001314 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001315
1316 return node;
1317}
1318
1319static struct binder_node *binder_new_node(struct binder_proc *proc,
1320 struct flat_binder_object *fp)
1321{
1322 struct binder_node *node;
1323 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1324
1325 if (!new_node)
1326 return NULL;
1327 binder_inner_proc_lock(proc);
1328 node = binder_init_node_ilocked(proc, new_node, fp);
1329 binder_inner_proc_unlock(proc);
1330 if (node != new_node)
1331 /*
1332 * The node was already added by another thread
1333 */
1334 kfree(new_node);
1335
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001336 return node;
1337}
1338
Todd Kjose7f23ed2017-03-21 13:06:01 -07001339static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001340{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001341 kfree(node);
1342 binder_stats_deleted(BINDER_STAT_NODE);
1343}
1344
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001345static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1346 int internal,
1347 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001348{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001349 struct binder_proc *proc = node->proc;
1350
1351 BUG_ON(!spin_is_locked(&node->lock));
1352 if (proc)
1353 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001354 if (strong) {
1355 if (internal) {
1356 if (target_list == NULL &&
1357 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001358 !(node->proc &&
1359 node == node->proc->context->
1360 binder_context_mgr_node &&
1361 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301362 pr_err("invalid inc strong node for %d\n",
1363 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001364 return -EINVAL;
1365 }
1366 node->internal_strong_refs++;
1367 } else
1368 node->local_strong_refs++;
1369 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001370 binder_dequeue_work_ilocked(&node->work);
1371 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001372 }
1373 } else {
1374 if (!internal)
1375 node->local_weak_refs++;
1376 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1377 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301378 pr_err("invalid inc weak node for %d\n",
1379 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001380 return -EINVAL;
1381 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001382 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001383 }
1384 }
1385 return 0;
1386}
1387
Todd Kjose7f23ed2017-03-21 13:06:01 -07001388static int binder_inc_node(struct binder_node *node, int strong, int internal,
1389 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001390{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001391 int ret;
1392
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001393 binder_node_inner_lock(node);
1394 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1395 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001396
1397 return ret;
1398}
1399
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001400static bool binder_dec_node_nilocked(struct binder_node *node,
1401 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001402{
1403 struct binder_proc *proc = node->proc;
1404
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001405 BUG_ON(!spin_is_locked(&node->lock));
Todd Kjose7f23ed2017-03-21 13:06:01 -07001406 if (proc)
1407 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001408 if (strong) {
1409 if (internal)
1410 node->internal_strong_refs--;
1411 else
1412 node->local_strong_refs--;
1413 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001414 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001415 } else {
1416 if (!internal)
1417 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001418 if (node->local_weak_refs || node->tmp_refs ||
1419 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001420 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001421 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001422
1423 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001424 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001425 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07001426 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001427 }
1428 } else {
1429 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001430 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001431 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001432 binder_dequeue_work_ilocked(&node->work);
1433 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001434 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301435 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001436 node->debug_id);
1437 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001438 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001439 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001440 /*
1441 * tmp_refs could have changed so
1442 * check it again
1443 */
1444 if (node->tmp_refs) {
1445 spin_unlock(&binder_dead_nodes_lock);
1446 return false;
1447 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001448 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001449 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001450 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301451 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001452 node->debug_id);
1453 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001454 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001455 }
1456 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001457 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001458}
1459
Todd Kjose7f23ed2017-03-21 13:06:01 -07001460static void binder_dec_node(struct binder_node *node, int strong, int internal)
1461{
1462 bool free_node;
1463
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001464 binder_node_inner_lock(node);
1465 free_node = binder_dec_node_nilocked(node, strong, internal);
1466 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001467 if (free_node)
1468 binder_free_node(node);
1469}
1470
1471static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001472{
1473 /*
1474 * No call to binder_inc_node() is needed since we
1475 * don't need to inform userspace of any changes to
1476 * tmp_refs
1477 */
1478 node->tmp_refs++;
1479}
1480
1481/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001482 * binder_inc_node_tmpref() - take a temporary reference on node
1483 * @node: node to reference
1484 *
1485 * Take reference on node to prevent the node from being freed
1486 * while referenced only by a local variable. The inner lock is
1487 * needed to serialize with the node work on the queue (which
1488 * isn't needed after the node is dead). If the node is dead
1489 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1490 * node->tmp_refs against dead-node-only cases where the node
1491 * lock cannot be acquired (eg traversing the dead node list to
1492 * print nodes)
1493 */
1494static void binder_inc_node_tmpref(struct binder_node *node)
1495{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001496 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001497 if (node->proc)
1498 binder_inner_proc_lock(node->proc);
1499 else
1500 spin_lock(&binder_dead_nodes_lock);
1501 binder_inc_node_tmpref_ilocked(node);
1502 if (node->proc)
1503 binder_inner_proc_unlock(node->proc);
1504 else
1505 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001506 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001507}
1508
1509/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001510 * binder_dec_node_tmpref() - remove a temporary reference on node
1511 * @node: node to reference
1512 *
1513 * Release temporary reference on node taken via binder_inc_node_tmpref()
1514 */
1515static void binder_dec_node_tmpref(struct binder_node *node)
1516{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001517 bool free_node;
1518
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001519 binder_node_inner_lock(node);
1520 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001521 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001522 node->tmp_refs--;
1523 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001524 if (!node->proc)
1525 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001526 /*
1527 * Call binder_dec_node() to check if all refcounts are 0
1528 * and cleanup is needed. Calling with strong=0 and internal=1
1529 * causes no actual reference to be released in binder_dec_node().
1530 * If that changes, a change is needed here too.
1531 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001532 free_node = binder_dec_node_nilocked(node, 0, 1);
1533 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001534 if (free_node)
1535 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001536}
1537
1538static void binder_put_node(struct binder_node *node)
1539{
1540 binder_dec_node_tmpref(node);
1541}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001542
Todd Kjos5346bf32016-10-20 16:43:34 -07001543static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1544 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001545{
1546 struct rb_node *n = proc->refs_by_desc.rb_node;
1547 struct binder_ref *ref;
1548
1549 while (n) {
1550 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1551
Todd Kjosb0117bb2017-05-08 09:16:27 -07001552 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001553 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001554 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001555 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001556 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001557 binder_user_error("tried to use weak ref as strong ref\n");
1558 return NULL;
1559 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001560 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001561 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001562 }
1563 return NULL;
1564}
1565
Todd Kjosb0117bb2017-05-08 09:16:27 -07001566/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001567 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001568 * @proc: binder_proc that owns the ref
1569 * @node: binder_node of target
1570 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1571 *
1572 * Look up the ref for the given node and return it if it exists
1573 *
1574 * If it doesn't exist and the caller provides a newly allocated
1575 * ref, initialize the fields of the newly allocated ref and insert
1576 * into the given proc rb_trees and node refs list.
1577 *
1578 * Return: the ref for node. It is possible that another thread
1579 * allocated/initialized the ref first in which case the
1580 * returned ref would be different than the passed-in
1581 * new_ref. new_ref must be kfree'd by the caller in
1582 * this case.
1583 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001584static struct binder_ref *binder_get_ref_for_node_olocked(
1585 struct binder_proc *proc,
1586 struct binder_node *node,
1587 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001588{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001589 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001590 struct rb_node **p = &proc->refs_by_node.rb_node;
1591 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001592 struct binder_ref *ref;
1593 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001594
1595 while (*p) {
1596 parent = *p;
1597 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1598
1599 if (node < ref->node)
1600 p = &(*p)->rb_left;
1601 else if (node > ref->node)
1602 p = &(*p)->rb_right;
1603 else
1604 return ref;
1605 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001606 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001607 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001608
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001609 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001610 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001611 new_ref->proc = proc;
1612 new_ref->node = node;
1613 rb_link_node(&new_ref->rb_node_node, parent, p);
1614 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1615
Todd Kjosb0117bb2017-05-08 09:16:27 -07001616 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001617 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1618 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001619 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001620 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001621 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001622 }
1623
1624 p = &proc->refs_by_desc.rb_node;
1625 while (*p) {
1626 parent = *p;
1627 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1628
Todd Kjosb0117bb2017-05-08 09:16:27 -07001629 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001630 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001631 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001632 p = &(*p)->rb_right;
1633 else
1634 BUG();
1635 }
1636 rb_link_node(&new_ref->rb_node_desc, parent, p);
1637 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001638
1639 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001640 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001641
Todd Kjos4cbe5752017-05-01 17:21:51 -07001642 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1643 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001644 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001645 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001646 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001647 return new_ref;
1648}
1649
Todd Kjos5346bf32016-10-20 16:43:34 -07001650static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001651{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001652 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001653
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001654 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301655 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001656 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301657 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001658
1659 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1660 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001661
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001662 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001663 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001664 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001665
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001666 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001667 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1668 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001669 /*
1670 * Clear ref->node unless we want the caller to free the node
1671 */
1672 if (!delete_node) {
1673 /*
1674 * The caller uses ref->node to determine
1675 * whether the node needs to be freed. Clear
1676 * it since the node is still alive.
1677 */
1678 ref->node = NULL;
1679 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001680
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001681 if (ref->death) {
1682 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301683 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001684 ref->proc->pid, ref->data.debug_id,
1685 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001686 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001687 binder_stats_deleted(BINDER_STAT_DEATH);
1688 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001689 binder_stats_deleted(BINDER_STAT_REF);
1690}
1691
Todd Kjosb0117bb2017-05-08 09:16:27 -07001692/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001693 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001694 * @ref: ref to be incremented
1695 * @strong: if true, strong increment, else weak
1696 * @target_list: list to queue node work on
1697 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001698 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001699 *
1700 * Return: 0, if successful, else errno
1701 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001702static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1703 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001704{
1705 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001706
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001707 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001708 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001709 ret = binder_inc_node(ref->node, 1, 1, target_list);
1710 if (ret)
1711 return ret;
1712 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001713 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001714 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001715 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001716 ret = binder_inc_node(ref->node, 0, 1, target_list);
1717 if (ret)
1718 return ret;
1719 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001720 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001721 }
1722 return 0;
1723}
1724
Todd Kjosb0117bb2017-05-08 09:16:27 -07001725/**
1726 * binder_dec_ref() - dec the ref for given handle
1727 * @ref: ref to be decremented
1728 * @strong: if true, strong decrement, else weak
1729 *
1730 * Decrement the ref.
1731 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001732 * Return: true if ref is cleaned up and ready to be freed
1733 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001734static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001735{
1736 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001737 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301738 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001739 ref->proc->pid, ref->data.debug_id,
1740 ref->data.desc, ref->data.strong,
1741 ref->data.weak);
1742 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001743 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001744 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001745 if (ref->data.strong == 0)
1746 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001747 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001748 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301749 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001750 ref->proc->pid, ref->data.debug_id,
1751 ref->data.desc, ref->data.strong,
1752 ref->data.weak);
1753 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001754 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001755 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001756 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001757 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001758 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001759 return true;
1760 }
1761 return false;
1762}
1763
1764/**
1765 * binder_get_node_from_ref() - get the node from the given proc/desc
1766 * @proc: proc containing the ref
1767 * @desc: the handle associated with the ref
1768 * @need_strong_ref: if true, only return node if ref is strong
1769 * @rdata: the id/refcount data for the ref
1770 *
1771 * Given a proc and ref handle, return the associated binder_node
1772 *
1773 * Return: a binder_node or NULL if not found or not strong when strong required
1774 */
1775static struct binder_node *binder_get_node_from_ref(
1776 struct binder_proc *proc,
1777 u32 desc, bool need_strong_ref,
1778 struct binder_ref_data *rdata)
1779{
1780 struct binder_node *node;
1781 struct binder_ref *ref;
1782
Todd Kjos5346bf32016-10-20 16:43:34 -07001783 binder_proc_lock(proc);
1784 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001785 if (!ref)
1786 goto err_no_ref;
1787 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001788 /*
1789 * Take an implicit reference on the node to ensure
1790 * it stays alive until the call to binder_put_node()
1791 */
1792 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001793 if (rdata)
1794 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001795 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001796
1797 return node;
1798
1799err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001800 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001801 return NULL;
1802}
1803
1804/**
1805 * binder_free_ref() - free the binder_ref
1806 * @ref: ref to free
1807 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001808 * Free the binder_ref. Free the binder_node indicated by ref->node
1809 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001810 */
1811static void binder_free_ref(struct binder_ref *ref)
1812{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001813 if (ref->node)
1814 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001815 kfree(ref->death);
1816 kfree(ref);
1817}
1818
1819/**
1820 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1821 * @proc: proc containing the ref
1822 * @desc: the handle associated with the ref
1823 * @increment: true=inc reference, false=dec reference
1824 * @strong: true=strong reference, false=weak reference
1825 * @rdata: the id/refcount data for the ref
1826 *
1827 * Given a proc and ref handle, increment or decrement the ref
1828 * according to "increment" arg.
1829 *
1830 * Return: 0 if successful, else errno
1831 */
1832static int binder_update_ref_for_handle(struct binder_proc *proc,
1833 uint32_t desc, bool increment, bool strong,
1834 struct binder_ref_data *rdata)
1835{
1836 int ret = 0;
1837 struct binder_ref *ref;
1838 bool delete_ref = false;
1839
Todd Kjos5346bf32016-10-20 16:43:34 -07001840 binder_proc_lock(proc);
1841 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001842 if (!ref) {
1843 ret = -EINVAL;
1844 goto err_no_ref;
1845 }
1846 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001847 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001848 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001849 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001850
1851 if (rdata)
1852 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001853 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001854
1855 if (delete_ref)
1856 binder_free_ref(ref);
1857 return ret;
1858
1859err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001860 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001861 return ret;
1862}
1863
1864/**
1865 * binder_dec_ref_for_handle() - dec the ref for given handle
1866 * @proc: proc containing the ref
1867 * @desc: the handle associated with the ref
1868 * @strong: true=strong reference, false=weak reference
1869 * @rdata: the id/refcount data for the ref
1870 *
1871 * Just calls binder_update_ref_for_handle() to decrement the ref.
1872 *
1873 * Return: 0 if successful, else errno
1874 */
1875static int binder_dec_ref_for_handle(struct binder_proc *proc,
1876 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1877{
1878 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1879}
1880
1881
1882/**
1883 * binder_inc_ref_for_node() - increment the ref for given proc/node
1884 * @proc: proc containing the ref
1885 * @node: target node
1886 * @strong: true=strong reference, false=weak reference
1887 * @target_list: worklist to use if node is incremented
1888 * @rdata: the id/refcount data for the ref
1889 *
1890 * Given a proc and node, increment the ref. Create the ref if it
1891 * doesn't already exist
1892 *
1893 * Return: 0 if successful, else errno
1894 */
1895static int binder_inc_ref_for_node(struct binder_proc *proc,
1896 struct binder_node *node,
1897 bool strong,
1898 struct list_head *target_list,
1899 struct binder_ref_data *rdata)
1900{
1901 struct binder_ref *ref;
1902 struct binder_ref *new_ref = NULL;
1903 int ret = 0;
1904
Todd Kjos5346bf32016-10-20 16:43:34 -07001905 binder_proc_lock(proc);
1906 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001907 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001908 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001909 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1910 if (!new_ref)
1911 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001912 binder_proc_lock(proc);
1913 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001914 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001915 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001916 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001917 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001918 if (new_ref && ref != new_ref)
1919 /*
1920 * Another thread created the ref first so
1921 * free the one we allocated
1922 */
1923 kfree(new_ref);
1924 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001925}
1926
Martijn Coenen995a36e2017-06-02 13:36:52 -07001927static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1928 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001929{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001930 BUG_ON(!target_thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07001931 BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
Todd Kjos21ef40a2017-03-30 18:02:13 -07001932 BUG_ON(target_thread->transaction_stack != t);
1933 BUG_ON(target_thread->transaction_stack->from != target_thread);
1934 target_thread->transaction_stack =
1935 target_thread->transaction_stack->from_parent;
1936 t->from = NULL;
1937}
1938
Todd Kjos2f993e22017-05-12 14:42:55 -07001939/**
1940 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1941 * @thread: thread to decrement
1942 *
1943 * A thread needs to be kept alive while being used to create or
1944 * handle a transaction. binder_get_txn_from() is used to safely
1945 * extract t->from from a binder_transaction and keep the thread
1946 * indicated by t->from from being freed. When done with that
1947 * binder_thread, this function is called to decrement the
1948 * tmp_ref and free if appropriate (thread has been released
1949 * and no transaction being processed by the driver)
1950 */
1951static void binder_thread_dec_tmpref(struct binder_thread *thread)
1952{
1953 /*
1954 * atomic is used to protect the counter value while
1955 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07001956 */
Todd Kjosb4827902017-05-25 15:52:17 -07001957 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001958 atomic_dec(&thread->tmp_ref);
1959 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07001960 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001961 binder_free_thread(thread);
1962 return;
1963 }
Todd Kjosb4827902017-05-25 15:52:17 -07001964 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001965}
1966
1967/**
1968 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1969 * @proc: proc to decrement
1970 *
1971 * A binder_proc needs to be kept alive while being used to create or
1972 * handle a transaction. proc->tmp_ref is incremented when
1973 * creating a new transaction or the binder_proc is currently in-use
1974 * by threads that are being released. When done with the binder_proc,
1975 * this function is called to decrement the counter and free the
1976 * proc if appropriate (proc has been released, all threads have
1977 * been released and not currenly in-use to process a transaction).
1978 */
1979static void binder_proc_dec_tmpref(struct binder_proc *proc)
1980{
Todd Kjosb4827902017-05-25 15:52:17 -07001981 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001982 proc->tmp_ref--;
1983 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1984 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07001985 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001986 binder_free_proc(proc);
1987 return;
1988 }
Todd Kjosb4827902017-05-25 15:52:17 -07001989 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001990}
1991
1992/**
1993 * binder_get_txn_from() - safely extract the "from" thread in transaction
1994 * @t: binder transaction for t->from
1995 *
1996 * Atomically return the "from" thread and increment the tmp_ref
1997 * count for the thread to ensure it stays alive until
1998 * binder_thread_dec_tmpref() is called.
1999 *
2000 * Return: the value of t->from
2001 */
2002static struct binder_thread *binder_get_txn_from(
2003 struct binder_transaction *t)
2004{
2005 struct binder_thread *from;
2006
2007 spin_lock(&t->lock);
2008 from = t->from;
2009 if (from)
2010 atomic_inc(&from->tmp_ref);
2011 spin_unlock(&t->lock);
2012 return from;
2013}
2014
Martijn Coenen995a36e2017-06-02 13:36:52 -07002015/**
2016 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2017 * @t: binder transaction for t->from
2018 *
2019 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2020 * to guarantee that the thread cannot be released while operating on it.
2021 * The caller must call binder_inner_proc_unlock() to release the inner lock
2022 * as well as call binder_dec_thread_txn() to release the reference.
2023 *
2024 * Return: the value of t->from
2025 */
2026static struct binder_thread *binder_get_txn_from_and_acq_inner(
2027 struct binder_transaction *t)
2028{
2029 struct binder_thread *from;
2030
2031 from = binder_get_txn_from(t);
2032 if (!from)
2033 return NULL;
2034 binder_inner_proc_lock(from->proc);
2035 if (t->from) {
2036 BUG_ON(from != t->from);
2037 return from;
2038 }
2039 binder_inner_proc_unlock(from->proc);
2040 binder_thread_dec_tmpref(from);
2041 return NULL;
2042}
2043
Todd Kjos21ef40a2017-03-30 18:02:13 -07002044static void binder_free_transaction(struct binder_transaction *t)
2045{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002046 if (t->buffer)
2047 t->buffer->transaction = NULL;
2048 kfree(t);
2049 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2050}
2051
2052static void binder_send_failed_reply(struct binder_transaction *t,
2053 uint32_t error_code)
2054{
2055 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002056 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09002057
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002058 BUG_ON(t->flags & TF_ONE_WAY);
2059 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002060 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002061 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002062 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2063 "send failed reply for transaction %d to %d:%d\n",
2064 t->debug_id,
2065 target_thread->proc->pid,
2066 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002067
Martijn Coenen995a36e2017-06-02 13:36:52 -07002068 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07002069 if (target_thread->reply_error.cmd == BR_OK) {
2070 target_thread->reply_error.cmd = error_code;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002071 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002072 &target_thread->reply_error.work,
Todd Kjos858b8da2017-04-21 17:35:12 -07002073 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002074 wake_up_interruptible(&target_thread->wait);
2075 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07002076 WARN(1, "Unexpected reply error: %u\n",
2077 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002078 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002079 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002080 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07002081 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002082 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002083 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002084 next = t->from_parent;
2085
2086 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2087 "send failed reply for transaction %d, target dead\n",
2088 t->debug_id);
2089
Todd Kjos21ef40a2017-03-30 18:02:13 -07002090 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002091 if (next == NULL) {
2092 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2093 "reply failed, no target thread at root\n");
2094 return;
2095 }
2096 t = next;
2097 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2098 "reply failed, no target thread -- retry %d\n",
2099 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002100 }
2101}
2102
Martijn Coenen00c80372016-07-13 12:06:49 +02002103/**
2104 * binder_validate_object() - checks for a valid metadata object in a buffer.
2105 * @buffer: binder_buffer that we're parsing.
2106 * @offset: offset in the buffer at which to validate an object.
2107 *
2108 * Return: If there's a valid metadata object at @offset in @buffer, the
2109 * size of that object. Otherwise, it returns zero.
2110 */
2111static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2112{
2113 /* Check if we can read a header first */
2114 struct binder_object_header *hdr;
2115 size_t object_size = 0;
2116
2117 if (offset > buffer->data_size - sizeof(*hdr) ||
2118 buffer->data_size < sizeof(*hdr) ||
2119 !IS_ALIGNED(offset, sizeof(u32)))
2120 return 0;
2121
2122 /* Ok, now see if we can read a complete object. */
2123 hdr = (struct binder_object_header *)(buffer->data + offset);
2124 switch (hdr->type) {
2125 case BINDER_TYPE_BINDER:
2126 case BINDER_TYPE_WEAK_BINDER:
2127 case BINDER_TYPE_HANDLE:
2128 case BINDER_TYPE_WEAK_HANDLE:
2129 object_size = sizeof(struct flat_binder_object);
2130 break;
2131 case BINDER_TYPE_FD:
2132 object_size = sizeof(struct binder_fd_object);
2133 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002134 case BINDER_TYPE_PTR:
2135 object_size = sizeof(struct binder_buffer_object);
2136 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002137 case BINDER_TYPE_FDA:
2138 object_size = sizeof(struct binder_fd_array_object);
2139 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02002140 default:
2141 return 0;
2142 }
2143 if (offset <= buffer->data_size - object_size &&
2144 buffer->data_size >= object_size)
2145 return object_size;
2146 else
2147 return 0;
2148}
2149
Martijn Coenen5a6da532016-09-30 14:10:07 +02002150/**
2151 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2152 * @b: binder_buffer containing the object
2153 * @index: index in offset array at which the binder_buffer_object is
2154 * located
2155 * @start: points to the start of the offset array
2156 * @num_valid: the number of valid offsets in the offset array
2157 *
2158 * Return: If @index is within the valid range of the offset array
2159 * described by @start and @num_valid, and if there's a valid
2160 * binder_buffer_object at the offset found in index @index
2161 * of the offset array, that object is returned. Otherwise,
2162 * %NULL is returned.
2163 * Note that the offset found in index @index itself is not
2164 * verified; this function assumes that @num_valid elements
2165 * from @start were previously verified to have valid offsets.
2166 */
2167static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2168 binder_size_t index,
2169 binder_size_t *start,
2170 binder_size_t num_valid)
2171{
2172 struct binder_buffer_object *buffer_obj;
2173 binder_size_t *offp;
2174
2175 if (index >= num_valid)
2176 return NULL;
2177
2178 offp = start + index;
2179 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2180 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2181 return NULL;
2182
2183 return buffer_obj;
2184}
2185
2186/**
2187 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2188 * @b: transaction buffer
2189 * @objects_start start of objects buffer
2190 * @buffer: binder_buffer_object in which to fix up
2191 * @offset: start offset in @buffer to fix up
2192 * @last_obj: last binder_buffer_object that we fixed up in
2193 * @last_min_offset: minimum fixup offset in @last_obj
2194 *
2195 * Return: %true if a fixup in buffer @buffer at offset @offset is
2196 * allowed.
2197 *
2198 * For safety reasons, we only allow fixups inside a buffer to happen
2199 * at increasing offsets; additionally, we only allow fixup on the last
2200 * buffer object that was verified, or one of its parents.
2201 *
2202 * Example of what is allowed:
2203 *
2204 * A
2205 * B (parent = A, offset = 0)
2206 * C (parent = A, offset = 16)
2207 * D (parent = C, offset = 0)
2208 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2209 *
2210 * Examples of what is not allowed:
2211 *
2212 * Decreasing offsets within the same parent:
2213 * A
2214 * C (parent = A, offset = 16)
2215 * B (parent = A, offset = 0) // decreasing offset within A
2216 *
2217 * Referring to a parent that wasn't the last object or any of its parents:
2218 * A
2219 * B (parent = A, offset = 0)
2220 * C (parent = A, offset = 0)
2221 * C (parent = A, offset = 16)
2222 * D (parent = B, offset = 0) // B is not A or any of A's parents
2223 */
2224static bool binder_validate_fixup(struct binder_buffer *b,
2225 binder_size_t *objects_start,
2226 struct binder_buffer_object *buffer,
2227 binder_size_t fixup_offset,
2228 struct binder_buffer_object *last_obj,
2229 binder_size_t last_min_offset)
2230{
2231 if (!last_obj) {
2232 /* Nothing to fix up in */
2233 return false;
2234 }
2235
2236 while (last_obj != buffer) {
2237 /*
2238 * Safe to retrieve the parent of last_obj, since it
2239 * was already previously verified by the driver.
2240 */
2241 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2242 return false;
2243 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2244 last_obj = (struct binder_buffer_object *)
2245 (b->data + *(objects_start + last_obj->parent));
2246 }
2247 return (fixup_offset >= last_min_offset);
2248}
2249
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002250static void binder_transaction_buffer_release(struct binder_proc *proc,
2251 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002252 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002253{
Martijn Coenen5a6da532016-09-30 14:10:07 +02002254 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002255 int debug_id = buffer->debug_id;
2256
2257 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302258 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002259 proc->pid, buffer->debug_id,
2260 buffer->data_size, buffer->offsets_size, failed_at);
2261
2262 if (buffer->target_node)
2263 binder_dec_node(buffer->target_node, 1, 0);
2264
Martijn Coenen5a6da532016-09-30 14:10:07 +02002265 off_start = (binder_size_t *)(buffer->data +
2266 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002267 if (failed_at)
2268 off_end = failed_at;
2269 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002270 off_end = (void *)off_start + buffer->offsets_size;
2271 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002272 struct binder_object_header *hdr;
2273 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002274
Martijn Coenen00c80372016-07-13 12:06:49 +02002275 if (object_size == 0) {
2276 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002277 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002278 continue;
2279 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002280 hdr = (struct binder_object_header *)(buffer->data + *offp);
2281 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002282 case BINDER_TYPE_BINDER:
2283 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002284 struct flat_binder_object *fp;
2285 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002286
Martijn Coenen00c80372016-07-13 12:06:49 +02002287 fp = to_flat_binder_object(hdr);
2288 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002289 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002290 pr_err("transaction release %d bad node %016llx\n",
2291 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002292 break;
2293 }
2294 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002295 " node %d u%016llx\n",
2296 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002297 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2298 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002299 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002300 } break;
2301 case BINDER_TYPE_HANDLE:
2302 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002303 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002304 struct binder_ref_data rdata;
2305 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002306
Martijn Coenen00c80372016-07-13 12:06:49 +02002307 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002308 ret = binder_dec_ref_for_handle(proc, fp->handle,
2309 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2310
2311 if (ret) {
2312 pr_err("transaction release %d bad handle %d, ret = %d\n",
2313 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002314 break;
2315 }
2316 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002317 " ref %d desc %d\n",
2318 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002319 } break;
2320
Martijn Coenen00c80372016-07-13 12:06:49 +02002321 case BINDER_TYPE_FD: {
2322 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2323
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002324 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002325 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002326 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002327 task_close_fd(proc, fp->fd);
2328 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002329 case BINDER_TYPE_PTR:
2330 /*
2331 * Nothing to do here, this will get cleaned up when the
2332 * transaction buffer gets freed
2333 */
2334 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002335 case BINDER_TYPE_FDA: {
2336 struct binder_fd_array_object *fda;
2337 struct binder_buffer_object *parent;
2338 uintptr_t parent_buffer;
2339 u32 *fd_array;
2340 size_t fd_index;
2341 binder_size_t fd_buf_size;
2342
2343 fda = to_binder_fd_array_object(hdr);
2344 parent = binder_validate_ptr(buffer, fda->parent,
2345 off_start,
2346 offp - off_start);
2347 if (!parent) {
2348 pr_err("transaction release %d bad parent offset",
2349 debug_id);
2350 continue;
2351 }
2352 /*
2353 * Since the parent was already fixed up, convert it
2354 * back to kernel address space to access it
2355 */
2356 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002357 binder_alloc_get_user_buffer_offset(
2358 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002359
2360 fd_buf_size = sizeof(u32) * fda->num_fds;
2361 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2362 pr_err("transaction release %d invalid number of fds (%lld)\n",
2363 debug_id, (u64)fda->num_fds);
2364 continue;
2365 }
2366 if (fd_buf_size > parent->length ||
2367 fda->parent_offset > parent->length - fd_buf_size) {
2368 /* No space for all file descriptors here. */
2369 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2370 debug_id, (u64)fda->num_fds);
2371 continue;
2372 }
2373 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2374 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2375 task_close_fd(proc, fd_array[fd_index]);
2376 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002377 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002378 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002379 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002380 break;
2381 }
2382 }
2383}
2384
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002385static int binder_translate_binder(struct flat_binder_object *fp,
2386 struct binder_transaction *t,
2387 struct binder_thread *thread)
2388{
2389 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002390 struct binder_proc *proc = thread->proc;
2391 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002392 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002393 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002394
2395 node = binder_get_node(proc, fp->binder);
2396 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002397 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002398 if (!node)
2399 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002400 }
2401 if (fp->cookie != node->cookie) {
2402 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2403 proc->pid, thread->pid, (u64)fp->binder,
2404 node->debug_id, (u64)fp->cookie,
2405 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002406 ret = -EINVAL;
2407 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002408 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002409 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2410 ret = -EPERM;
2411 goto done;
2412 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002413
Todd Kjosb0117bb2017-05-08 09:16:27 -07002414 ret = binder_inc_ref_for_node(target_proc, node,
2415 fp->hdr.type == BINDER_TYPE_BINDER,
2416 &thread->todo, &rdata);
2417 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002418 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002419
2420 if (fp->hdr.type == BINDER_TYPE_BINDER)
2421 fp->hdr.type = BINDER_TYPE_HANDLE;
2422 else
2423 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2424 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002425 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002426 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002427
Todd Kjosb0117bb2017-05-08 09:16:27 -07002428 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002429 binder_debug(BINDER_DEBUG_TRANSACTION,
2430 " node %d u%016llx -> ref %d desc %d\n",
2431 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002432 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002433done:
2434 binder_put_node(node);
2435 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002436}
2437
2438static int binder_translate_handle(struct flat_binder_object *fp,
2439 struct binder_transaction *t,
2440 struct binder_thread *thread)
2441{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002442 struct binder_proc *proc = thread->proc;
2443 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002444 struct binder_node *node;
2445 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002446 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002447
Todd Kjosb0117bb2017-05-08 09:16:27 -07002448 node = binder_get_node_from_ref(proc, fp->handle,
2449 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2450 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002451 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2452 proc->pid, thread->pid, fp->handle);
2453 return -EINVAL;
2454 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002455 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2456 ret = -EPERM;
2457 goto done;
2458 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002459
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002460 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002461 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002462 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2463 fp->hdr.type = BINDER_TYPE_BINDER;
2464 else
2465 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002466 fp->binder = node->ptr;
2467 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002468 if (node->proc)
2469 binder_inner_proc_lock(node->proc);
2470 binder_inc_node_nilocked(node,
2471 fp->hdr.type == BINDER_TYPE_BINDER,
2472 0, NULL);
2473 if (node->proc)
2474 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002475 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002476 binder_debug(BINDER_DEBUG_TRANSACTION,
2477 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002478 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2479 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002480 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002481 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002482 int ret;
2483 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002484
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002485 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002486 ret = binder_inc_ref_for_node(target_proc, node,
2487 fp->hdr.type == BINDER_TYPE_HANDLE,
2488 NULL, &dest_rdata);
2489 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002490 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002491
2492 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002493 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002494 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002495 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2496 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002497 binder_debug(BINDER_DEBUG_TRANSACTION,
2498 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002499 src_rdata.debug_id, src_rdata.desc,
2500 dest_rdata.debug_id, dest_rdata.desc,
2501 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002502 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002503done:
2504 binder_put_node(node);
2505 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002506}
2507
2508static int binder_translate_fd(int fd,
2509 struct binder_transaction *t,
2510 struct binder_thread *thread,
2511 struct binder_transaction *in_reply_to)
2512{
2513 struct binder_proc *proc = thread->proc;
2514 struct binder_proc *target_proc = t->to_proc;
2515 int target_fd;
2516 struct file *file;
2517 int ret;
2518 bool target_allows_fd;
2519
2520 if (in_reply_to)
2521 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2522 else
2523 target_allows_fd = t->buffer->target_node->accept_fds;
2524 if (!target_allows_fd) {
2525 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2526 proc->pid, thread->pid,
2527 in_reply_to ? "reply" : "transaction",
2528 fd);
2529 ret = -EPERM;
2530 goto err_fd_not_accepted;
2531 }
2532
2533 file = fget(fd);
2534 if (!file) {
2535 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2536 proc->pid, thread->pid, fd);
2537 ret = -EBADF;
2538 goto err_fget;
2539 }
2540 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2541 if (ret < 0) {
2542 ret = -EPERM;
2543 goto err_security;
2544 }
2545
2546 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2547 if (target_fd < 0) {
2548 ret = -ENOMEM;
2549 goto err_get_unused_fd;
2550 }
2551 task_fd_install(target_proc, target_fd, file);
2552 trace_binder_transaction_fd(t, fd, target_fd);
2553 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2554 fd, target_fd);
2555
2556 return target_fd;
2557
2558err_get_unused_fd:
2559err_security:
2560 fput(file);
2561err_fget:
2562err_fd_not_accepted:
2563 return ret;
2564}
2565
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002566static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2567 struct binder_buffer_object *parent,
2568 struct binder_transaction *t,
2569 struct binder_thread *thread,
2570 struct binder_transaction *in_reply_to)
2571{
2572 binder_size_t fdi, fd_buf_size, num_installed_fds;
2573 int target_fd;
2574 uintptr_t parent_buffer;
2575 u32 *fd_array;
2576 struct binder_proc *proc = thread->proc;
2577 struct binder_proc *target_proc = t->to_proc;
2578
2579 fd_buf_size = sizeof(u32) * fda->num_fds;
2580 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2581 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2582 proc->pid, thread->pid, (u64)fda->num_fds);
2583 return -EINVAL;
2584 }
2585 if (fd_buf_size > parent->length ||
2586 fda->parent_offset > parent->length - fd_buf_size) {
2587 /* No space for all file descriptors here. */
2588 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2589 proc->pid, thread->pid, (u64)fda->num_fds);
2590 return -EINVAL;
2591 }
2592 /*
2593 * Since the parent was already fixed up, convert it
2594 * back to the kernel address space to access it
2595 */
Todd Kjosd325d372016-10-10 10:40:53 -07002596 parent_buffer = parent->buffer -
2597 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002598 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2599 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2600 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2601 proc->pid, thread->pid);
2602 return -EINVAL;
2603 }
2604 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2605 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2606 in_reply_to);
2607 if (target_fd < 0)
2608 goto err_translate_fd_failed;
2609 fd_array[fdi] = target_fd;
2610 }
2611 return 0;
2612
2613err_translate_fd_failed:
2614 /*
2615 * Failed to allocate fd or security error, free fds
2616 * installed so far.
2617 */
2618 num_installed_fds = fdi;
2619 for (fdi = 0; fdi < num_installed_fds; fdi++)
2620 task_close_fd(target_proc, fd_array[fdi]);
2621 return target_fd;
2622}
2623
Martijn Coenen5a6da532016-09-30 14:10:07 +02002624static int binder_fixup_parent(struct binder_transaction *t,
2625 struct binder_thread *thread,
2626 struct binder_buffer_object *bp,
2627 binder_size_t *off_start,
2628 binder_size_t num_valid,
2629 struct binder_buffer_object *last_fixup_obj,
2630 binder_size_t last_fixup_min_off)
2631{
2632 struct binder_buffer_object *parent;
2633 u8 *parent_buffer;
2634 struct binder_buffer *b = t->buffer;
2635 struct binder_proc *proc = thread->proc;
2636 struct binder_proc *target_proc = t->to_proc;
2637
2638 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2639 return 0;
2640
2641 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2642 if (!parent) {
2643 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2644 proc->pid, thread->pid);
2645 return -EINVAL;
2646 }
2647
2648 if (!binder_validate_fixup(b, off_start,
2649 parent, bp->parent_offset,
2650 last_fixup_obj,
2651 last_fixup_min_off)) {
2652 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2653 proc->pid, thread->pid);
2654 return -EINVAL;
2655 }
2656
2657 if (parent->length < sizeof(binder_uintptr_t) ||
2658 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2659 /* No space for a pointer here! */
2660 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2661 proc->pid, thread->pid);
2662 return -EINVAL;
2663 }
2664 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002665 binder_alloc_get_user_buffer_offset(
2666 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002667 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2668
2669 return 0;
2670}
2671
Martijn Coenen053be422017-06-06 15:17:46 -07002672/**
2673 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2674 * @t: transaction to send
2675 * @proc: process to send the transaction to
2676 * @thread: thread in @proc to send the transaction to (may be NULL)
2677 *
2678 * This function queues a transaction to the specified process. It will try
2679 * to find a thread in the target process to handle the transaction and
2680 * wake it up. If no thread is found, the work is queued to the proc
2681 * waitqueue.
2682 *
2683 * If the @thread parameter is not NULL, the transaction is always queued
2684 * to the waitlist of that specific thread.
2685 *
2686 * Return: true if the transactions was successfully queued
2687 * false if the target process or thread is dead
2688 */
2689static bool binder_proc_transaction(struct binder_transaction *t,
2690 struct binder_proc *proc,
2691 struct binder_thread *thread)
2692{
2693 struct list_head *target_list = NULL;
2694 struct binder_node *node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002695 struct binder_priority node_prio;
Martijn Coenen053be422017-06-06 15:17:46 -07002696 bool oneway = !!(t->flags & TF_ONE_WAY);
2697 bool wakeup = true;
2698
2699 BUG_ON(!node);
2700 binder_node_lock(node);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002701 node_prio.prio = node->min_priority;
2702 node_prio.sched_policy = node->sched_policy;
2703
Martijn Coenen053be422017-06-06 15:17:46 -07002704 if (oneway) {
2705 BUG_ON(thread);
2706 if (node->has_async_transaction) {
2707 target_list = &node->async_todo;
2708 wakeup = false;
2709 } else {
2710 node->has_async_transaction = 1;
2711 }
2712 }
2713
2714 binder_inner_proc_lock(proc);
2715
2716 if (proc->is_dead || (thread && thread->is_dead)) {
2717 binder_inner_proc_unlock(proc);
2718 binder_node_unlock(node);
2719 return false;
2720 }
2721
2722 if (!thread && !target_list)
2723 thread = binder_select_thread_ilocked(proc);
2724
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002725 if (thread) {
Martijn Coenen053be422017-06-06 15:17:46 -07002726 target_list = &thread->todo;
Martijn Coenenc46810c2017-06-23 10:13:43 -07002727 binder_transaction_priority(thread->task, t, node_prio,
2728 node->inherit_rt);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002729 } else if (!target_list) {
Martijn Coenen053be422017-06-06 15:17:46 -07002730 target_list = &proc->todo;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002731 } else {
Martijn Coenen053be422017-06-06 15:17:46 -07002732 BUG_ON(target_list != &node->async_todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002733 }
Martijn Coenen053be422017-06-06 15:17:46 -07002734
2735 binder_enqueue_work_ilocked(&t->work, target_list);
2736
2737 if (wakeup)
2738 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2739
2740 binder_inner_proc_unlock(proc);
2741 binder_node_unlock(node);
2742
2743 return true;
2744}
2745
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002746static void binder_transaction(struct binder_proc *proc,
2747 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002748 struct binder_transaction_data *tr, int reply,
2749 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002750{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002751 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002752 struct binder_transaction *t;
2753 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002754 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002755 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002756 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002757 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002758 struct binder_thread *target_thread = NULL;
2759 struct binder_node *target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002760 struct binder_transaction *in_reply_to = NULL;
2761 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002762 uint32_t return_error = 0;
2763 uint32_t return_error_param = 0;
2764 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002765 struct binder_buffer_object *last_fixup_obj = NULL;
2766 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002767 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002768 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002769
2770 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002771 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002772 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2773 e->from_proc = proc->pid;
2774 e->from_thread = thread->pid;
2775 e->target_handle = tr->target.handle;
2776 e->data_size = tr->data_size;
2777 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002778 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002779
2780 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002781 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002782 in_reply_to = thread->transaction_stack;
2783 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002784 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302785 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002786 proc->pid, thread->pid);
2787 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002788 return_error_param = -EPROTO;
2789 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002790 goto err_empty_call_stack;
2791 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002792 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002793 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302794 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002795 proc->pid, thread->pid, in_reply_to->debug_id,
2796 in_reply_to->to_proc ?
2797 in_reply_to->to_proc->pid : 0,
2798 in_reply_to->to_thread ?
2799 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002800 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002801 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002802 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002803 return_error_param = -EPROTO;
2804 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002805 in_reply_to = NULL;
2806 goto err_bad_call_stack;
2807 }
2808 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002809 binder_inner_proc_unlock(proc);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002810 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002811 if (target_thread == NULL) {
2812 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002813 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002814 goto err_dead_binder;
2815 }
2816 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302817 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002818 proc->pid, thread->pid,
2819 target_thread->transaction_stack ?
2820 target_thread->transaction_stack->debug_id : 0,
2821 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002822 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002823 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002824 return_error_param = -EPROTO;
2825 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002826 in_reply_to = NULL;
2827 target_thread = NULL;
2828 goto err_dead_binder;
2829 }
2830 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002831 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002832 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002833 } else {
2834 if (tr->target.handle) {
2835 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002836
Todd Kjosc37162d2017-05-26 11:56:29 -07002837 /*
2838 * There must already be a strong ref
2839 * on this node. If so, do a strong
2840 * increment on the node to ensure it
2841 * stays alive until the transaction is
2842 * done.
2843 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002844 binder_proc_lock(proc);
2845 ref = binder_get_ref_olocked(proc, tr->target.handle,
2846 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002847 if (ref) {
2848 binder_inc_node(ref->node, 1, 0, NULL);
2849 target_node = ref->node;
2850 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002851 binder_proc_unlock(proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002852 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302853 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002854 proc->pid, thread->pid);
2855 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002856 return_error_param = -EINVAL;
2857 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002858 goto err_invalid_target_handle;
2859 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002860 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002861 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002862 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002863 if (target_node == NULL) {
2864 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002865 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjose598d172017-03-22 17:19:52 -07002866 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002867 goto err_no_context_mgr_node;
2868 }
Todd Kjosc37162d2017-05-26 11:56:29 -07002869 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002870 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002871 }
2872 e->to_node = target_node->debug_id;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002873 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002874 target_proc = target_node->proc;
2875 if (target_proc == NULL) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002876 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002877 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002878 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002879 goto err_dead_binder;
2880 }
Todd Kjosb4827902017-05-25 15:52:17 -07002881 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002882 target_proc->tmp_ref++;
Todd Kjosb4827902017-05-25 15:52:17 -07002883 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002884 binder_node_unlock(target_node);
Stephen Smalley79af7302015-01-21 10:54:10 -05002885 if (security_binder_transaction(proc->tsk,
2886 target_proc->tsk) < 0) {
2887 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002888 return_error_param = -EPERM;
2889 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05002890 goto err_invalid_target_handle;
2891 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002892 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002893 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2894 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002895
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002896 tmp = thread->transaction_stack;
2897 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002898 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302899 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002900 proc->pid, thread->pid, tmp->debug_id,
2901 tmp->to_proc ? tmp->to_proc->pid : 0,
2902 tmp->to_thread ?
2903 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002904 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002905 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002906 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002907 return_error_param = -EPROTO;
2908 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002909 goto err_bad_call_stack;
2910 }
2911 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002912 struct binder_thread *from;
2913
2914 spin_lock(&tmp->lock);
2915 from = tmp->from;
2916 if (from && from->proc == target_proc) {
2917 atomic_inc(&from->tmp_ref);
2918 target_thread = from;
2919 spin_unlock(&tmp->lock);
2920 break;
2921 }
2922 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002923 tmp = tmp->from_parent;
2924 }
2925 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002926 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002927 }
Martijn Coenen053be422017-06-06 15:17:46 -07002928 if (target_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002929 e->to_thread = target_thread->pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002930 e->to_proc = target_proc->pid;
2931
2932 /* TODO: reuse incoming transaction for reply */
2933 t = kzalloc(sizeof(*t), GFP_KERNEL);
2934 if (t == NULL) {
2935 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002936 return_error_param = -ENOMEM;
2937 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002938 goto err_alloc_t_failed;
2939 }
2940 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07002941 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002942
2943 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2944 if (tcomplete == NULL) {
2945 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002946 return_error_param = -ENOMEM;
2947 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002948 goto err_alloc_tcomplete_failed;
2949 }
2950 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2951
Todd Kjos1cfe6272017-05-24 13:33:28 -07002952 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002953
2954 if (reply)
2955 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002956 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002957 proc->pid, thread->pid, t->debug_id,
2958 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002959 (u64)tr->data.ptr.buffer,
2960 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002961 (u64)tr->data_size, (u64)tr->offsets_size,
2962 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002963 else
2964 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002965 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002966 proc->pid, thread->pid, t->debug_id,
2967 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002968 (u64)tr->data.ptr.buffer,
2969 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002970 (u64)tr->data_size, (u64)tr->offsets_size,
2971 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002972
2973 if (!reply && !(tr->flags & TF_ONE_WAY))
2974 t->from = thread;
2975 else
2976 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03002977 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002978 t->to_proc = target_proc;
2979 t->to_thread = target_thread;
2980 t->code = tr->code;
2981 t->flags = tr->flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07002982 if (!(t->flags & TF_ONE_WAY) &&
2983 binder_supported_policy(current->policy)) {
2984 /* Inherit supported policies for synchronous transactions */
2985 t->priority.sched_policy = current->policy;
2986 t->priority.prio = current->normal_prio;
2987 } else {
2988 /* Otherwise, fall back to the default priority */
2989 t->priority = target_proc->default_priority;
2990 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002991
2992 trace_binder_transaction(reply, t, target_node);
2993
Todd Kjosd325d372016-10-10 10:40:53 -07002994 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02002995 tr->offsets_size, extra_buffers_size,
2996 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07002997 if (IS_ERR(t->buffer)) {
2998 /*
2999 * -ESRCH indicates VMA cleared. The target is dying.
3000 */
3001 return_error_param = PTR_ERR(t->buffer);
3002 return_error = return_error_param == -ESRCH ?
3003 BR_DEAD_REPLY : BR_FAILED_REPLY;
3004 return_error_line = __LINE__;
3005 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003006 goto err_binder_alloc_buf_failed;
3007 }
3008 t->buffer->allow_user_free = 0;
3009 t->buffer->debug_id = t->debug_id;
3010 t->buffer->transaction = t;
3011 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003012 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003013 off_start = (binder_size_t *)(t->buffer->data +
3014 ALIGN(tr->data_size, sizeof(void *)));
3015 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003016
Arve Hjønnevågda498892014-02-21 14:40:26 -08003017 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3018 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303019 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3020 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003021 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003022 return_error_param = -EFAULT;
3023 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003024 goto err_copy_data_failed;
3025 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003026 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3027 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303028 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3029 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003030 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003031 return_error_param = -EFAULT;
3032 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003033 goto err_copy_data_failed;
3034 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003035 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3036 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3037 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003038 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003039 return_error_param = -EINVAL;
3040 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003041 goto err_bad_offset;
3042 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02003043 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3044 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3045 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05303046 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003047 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003048 return_error_param = -EINVAL;
3049 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003050 goto err_bad_offset;
3051 }
3052 off_end = (void *)off_start + tr->offsets_size;
3053 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3054 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003055 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003056 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02003057 struct binder_object_header *hdr;
3058 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09003059
Martijn Coenen00c80372016-07-13 12:06:49 +02003060 if (object_size == 0 || *offp < off_min) {
3061 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003062 proc->pid, thread->pid, (u64)*offp,
3063 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02003064 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003065 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003066 return_error_param = -EINVAL;
3067 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003068 goto err_bad_offset;
3069 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003070
3071 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3072 off_min = *offp + object_size;
3073 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003074 case BINDER_TYPE_BINDER:
3075 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003076 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003077
Martijn Coenen00c80372016-07-13 12:06:49 +02003078 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003079 ret = binder_translate_binder(fp, t, thread);
3080 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02003081 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003082 return_error_param = ret;
3083 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003084 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003085 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003086 } break;
3087 case BINDER_TYPE_HANDLE:
3088 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003089 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003090
Martijn Coenen00c80372016-07-13 12:06:49 +02003091 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003092 ret = binder_translate_handle(fp, t, thread);
3093 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003094 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003095 return_error_param = ret;
3096 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003097 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003098 }
3099 } break;
3100
3101 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003102 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003103 int target_fd = binder_translate_fd(fp->fd, t, thread,
3104 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003105
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003106 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003107 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003108 return_error_param = target_fd;
3109 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003110 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003111 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003112 fp->pad_binder = 0;
3113 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003114 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003115 case BINDER_TYPE_FDA: {
3116 struct binder_fd_array_object *fda =
3117 to_binder_fd_array_object(hdr);
3118 struct binder_buffer_object *parent =
3119 binder_validate_ptr(t->buffer, fda->parent,
3120 off_start,
3121 offp - off_start);
3122 if (!parent) {
3123 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3124 proc->pid, thread->pid);
3125 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003126 return_error_param = -EINVAL;
3127 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003128 goto err_bad_parent;
3129 }
3130 if (!binder_validate_fixup(t->buffer, off_start,
3131 parent, fda->parent_offset,
3132 last_fixup_obj,
3133 last_fixup_min_off)) {
3134 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3135 proc->pid, thread->pid);
3136 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003137 return_error_param = -EINVAL;
3138 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003139 goto err_bad_parent;
3140 }
3141 ret = binder_translate_fd_array(fda, parent, t, thread,
3142 in_reply_to);
3143 if (ret < 0) {
3144 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003145 return_error_param = ret;
3146 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003147 goto err_translate_failed;
3148 }
3149 last_fixup_obj = parent;
3150 last_fixup_min_off =
3151 fda->parent_offset + sizeof(u32) * fda->num_fds;
3152 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003153 case BINDER_TYPE_PTR: {
3154 struct binder_buffer_object *bp =
3155 to_binder_buffer_object(hdr);
3156 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003157
Martijn Coenen5a6da532016-09-30 14:10:07 +02003158 if (bp->length > buf_left) {
3159 binder_user_error("%d:%d got transaction with too large buffer\n",
3160 proc->pid, thread->pid);
3161 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003162 return_error_param = -EINVAL;
3163 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003164 goto err_bad_offset;
3165 }
3166 if (copy_from_user(sg_bufp,
3167 (const void __user *)(uintptr_t)
3168 bp->buffer, bp->length)) {
3169 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3170 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07003171 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003172 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003173 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003174 goto err_copy_data_failed;
3175 }
3176 /* Fixup buffer pointer to target proc address space */
3177 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07003178 binder_alloc_get_user_buffer_offset(
3179 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003180 sg_bufp += ALIGN(bp->length, sizeof(u64));
3181
3182 ret = binder_fixup_parent(t, thread, bp, off_start,
3183 offp - off_start,
3184 last_fixup_obj,
3185 last_fixup_min_off);
3186 if (ret < 0) {
3187 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003188 return_error_param = ret;
3189 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003190 goto err_translate_failed;
3191 }
3192 last_fixup_obj = bp;
3193 last_fixup_min_off = 0;
3194 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003195 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01003196 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02003197 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003198 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003199 return_error_param = -EINVAL;
3200 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003201 goto err_bad_object_type;
3202 }
3203 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003204 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003205 binder_enqueue_work(proc, tcomplete, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003206 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003207
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003208 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003209 binder_inner_proc_lock(target_proc);
3210 if (target_thread->is_dead) {
3211 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003212 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003213 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003214 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003215 binder_pop_transaction_ilocked(target_thread, in_reply_to);
Martijn Coenen053be422017-06-06 15:17:46 -07003216 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003217 binder_inner_proc_unlock(target_proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003218 wake_up_interruptible_sync(&target_thread->wait);
Martijn Coenenecd972d2017-05-26 10:48:56 -07003219 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003220 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003221 } else if (!(t->flags & TF_ONE_WAY)) {
3222 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003223 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003224 t->need_reply = 1;
3225 t->from_parent = thread->transaction_stack;
3226 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003227 binder_inner_proc_unlock(proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003228 if (!binder_proc_transaction(t, target_proc, target_thread)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003229 binder_inner_proc_lock(proc);
3230 binder_pop_transaction_ilocked(thread, t);
3231 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003232 goto err_dead_proc_or_thread;
3233 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003234 } else {
3235 BUG_ON(target_node == NULL);
3236 BUG_ON(t->buffer->async_transaction != 1);
Martijn Coenen053be422017-06-06 15:17:46 -07003237 if (!binder_proc_transaction(t, target_proc, NULL))
Todd Kjos2f993e22017-05-12 14:42:55 -07003238 goto err_dead_proc_or_thread;
Riley Andrewsb5968812015-09-01 12:42:07 -07003239 }
Todd Kjos2f993e22017-05-12 14:42:55 -07003240 if (target_thread)
3241 binder_thread_dec_tmpref(target_thread);
3242 binder_proc_dec_tmpref(target_proc);
Todd Kjos1cfe6272017-05-24 13:33:28 -07003243 /*
3244 * write barrier to synchronize with initialization
3245 * of log entry
3246 */
3247 smp_wmb();
3248 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003249 return;
3250
Todd Kjos2f993e22017-05-12 14:42:55 -07003251err_dead_proc_or_thread:
3252 return_error = BR_DEAD_REPLY;
3253 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003254err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003255err_bad_object_type:
3256err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003257err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003258err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003259 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003260 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjosc37162d2017-05-26 11:56:29 -07003261 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003262 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07003263 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003264err_binder_alloc_buf_failed:
3265 kfree(tcomplete);
3266 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3267err_alloc_tcomplete_failed:
3268 kfree(t);
3269 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3270err_alloc_t_failed:
3271err_bad_call_stack:
3272err_empty_call_stack:
3273err_dead_binder:
3274err_invalid_target_handle:
3275err_no_context_mgr_node:
Todd Kjos2f993e22017-05-12 14:42:55 -07003276 if (target_thread)
3277 binder_thread_dec_tmpref(target_thread);
3278 if (target_proc)
3279 binder_proc_dec_tmpref(target_proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07003280 if (target_node)
3281 binder_dec_node(target_node, 1, 0);
3282
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003283 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07003284 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3285 proc->pid, thread->pid, return_error, return_error_param,
3286 (u64)tr->data_size, (u64)tr->offsets_size,
3287 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003288
3289 {
3290 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09003291
Todd Kjose598d172017-03-22 17:19:52 -07003292 e->return_error = return_error;
3293 e->return_error_param = return_error_param;
3294 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003295 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3296 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07003297 /*
3298 * write barrier to synchronize with initialization
3299 * of log entry
3300 */
3301 smp_wmb();
3302 WRITE_ONCE(e->debug_id_done, t_debug_id);
3303 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003304 }
3305
Todd Kjos858b8da2017-04-21 17:35:12 -07003306 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003307 if (in_reply_to) {
Martijn Coenenecd972d2017-05-26 10:48:56 -07003308 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos858b8da2017-04-21 17:35:12 -07003309 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003310 binder_enqueue_work(thread->proc,
3311 &thread->return_error.work,
3312 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003313 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07003314 } else {
3315 thread->return_error.cmd = return_error;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003316 binder_enqueue_work(thread->proc,
3317 &thread->return_error.work,
3318 &thread->todo);
Todd Kjos858b8da2017-04-21 17:35:12 -07003319 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003320}
3321
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003322static int binder_thread_write(struct binder_proc *proc,
3323 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003324 binder_uintptr_t binder_buffer, size_t size,
3325 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003326{
3327 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003328 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003329 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003330 void __user *ptr = buffer + *consumed;
3331 void __user *end = buffer + size;
3332
Todd Kjos858b8da2017-04-21 17:35:12 -07003333 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003334 int ret;
3335
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003336 if (get_user(cmd, (uint32_t __user *)ptr))
3337 return -EFAULT;
3338 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003339 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003340 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003341 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3342 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3343 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003344 }
3345 switch (cmd) {
3346 case BC_INCREFS:
3347 case BC_ACQUIRE:
3348 case BC_RELEASE:
3349 case BC_DECREFS: {
3350 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003351 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003352 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3353 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3354 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003355
3356 if (get_user(target, (uint32_t __user *)ptr))
3357 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003358
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003359 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003360 ret = -1;
3361 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003362 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003363 mutex_lock(&context->context_mgr_node_lock);
3364 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003365 if (ctx_mgr_node)
3366 ret = binder_inc_ref_for_node(
3367 proc, ctx_mgr_node,
3368 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003369 mutex_unlock(&context->context_mgr_node_lock);
3370 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003371 if (ret)
3372 ret = binder_update_ref_for_handle(
3373 proc, target, increment, strong,
3374 &rdata);
3375 if (!ret && rdata.desc != target) {
3376 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3377 proc->pid, thread->pid,
3378 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003379 }
3380 switch (cmd) {
3381 case BC_INCREFS:
3382 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003383 break;
3384 case BC_ACQUIRE:
3385 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003386 break;
3387 case BC_RELEASE:
3388 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003389 break;
3390 case BC_DECREFS:
3391 default:
3392 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003393 break;
3394 }
3395 if (ret) {
3396 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3397 proc->pid, thread->pid, debug_string,
3398 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003399 break;
3400 }
3401 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003402 "%d:%d %s ref %d desc %d s %d w %d\n",
3403 proc->pid, thread->pid, debug_string,
3404 rdata.debug_id, rdata.desc, rdata.strong,
3405 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003406 break;
3407 }
3408 case BC_INCREFS_DONE:
3409 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003410 binder_uintptr_t node_ptr;
3411 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003412 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003413 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003414
Arve Hjønnevågda498892014-02-21 14:40:26 -08003415 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003416 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003417 ptr += sizeof(binder_uintptr_t);
3418 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003419 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003420 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003421 node = binder_get_node(proc, node_ptr);
3422 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003423 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003424 proc->pid, thread->pid,
3425 cmd == BC_INCREFS_DONE ?
3426 "BC_INCREFS_DONE" :
3427 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003428 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003429 break;
3430 }
3431 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003432 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003433 proc->pid, thread->pid,
3434 cmd == BC_INCREFS_DONE ?
3435 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003436 (u64)node_ptr, node->debug_id,
3437 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003438 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003439 break;
3440 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003441 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003442 if (cmd == BC_ACQUIRE_DONE) {
3443 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303444 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003445 proc->pid, thread->pid,
3446 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003447 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003448 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003449 break;
3450 }
3451 node->pending_strong_ref = 0;
3452 } else {
3453 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303454 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003455 proc->pid, thread->pid,
3456 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003457 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003458 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003459 break;
3460 }
3461 node->pending_weak_ref = 0;
3462 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003463 free_node = binder_dec_node_nilocked(node,
3464 cmd == BC_ACQUIRE_DONE, 0);
3465 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003466 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003467 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003468 proc->pid, thread->pid,
3469 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003470 node->debug_id, node->local_strong_refs,
3471 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003472 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003473 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003474 break;
3475 }
3476 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303477 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003478 return -EINVAL;
3479 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303480 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003481 return -EINVAL;
3482
3483 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003484 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003485 struct binder_buffer *buffer;
3486
Arve Hjønnevågda498892014-02-21 14:40:26 -08003487 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003488 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003489 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003490
Todd Kjos076072a2017-04-21 14:32:11 -07003491 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3492 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003493 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003494 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3495 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003496 break;
3497 }
3498 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003499 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3500 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003501 break;
3502 }
3503 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003504 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3505 proc->pid, thread->pid, (u64)data_ptr,
3506 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003507 buffer->transaction ? "active" : "finished");
3508
3509 if (buffer->transaction) {
3510 buffer->transaction->buffer = NULL;
3511 buffer->transaction = NULL;
3512 }
3513 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003514 struct binder_node *buf_node;
3515 struct binder_work *w;
3516
3517 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003518 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003519 BUG_ON(!buf_node->has_async_transaction);
3520 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003521 w = binder_dequeue_work_head_ilocked(
3522 &buf_node->async_todo);
3523 if (!w)
3524 buf_node->has_async_transaction = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003525 else
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003526 binder_enqueue_work_ilocked(
3527 w, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003528 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003529 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003530 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003531 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003532 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003533 break;
3534 }
3535
Martijn Coenen5a6da532016-09-30 14:10:07 +02003536 case BC_TRANSACTION_SG:
3537 case BC_REPLY_SG: {
3538 struct binder_transaction_data_sg tr;
3539
3540 if (copy_from_user(&tr, ptr, sizeof(tr)))
3541 return -EFAULT;
3542 ptr += sizeof(tr);
3543 binder_transaction(proc, thread, &tr.transaction_data,
3544 cmd == BC_REPLY_SG, tr.buffers_size);
3545 break;
3546 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003547 case BC_TRANSACTION:
3548 case BC_REPLY: {
3549 struct binder_transaction_data tr;
3550
3551 if (copy_from_user(&tr, ptr, sizeof(tr)))
3552 return -EFAULT;
3553 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003554 binder_transaction(proc, thread, &tr,
3555 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003556 break;
3557 }
3558
3559 case BC_REGISTER_LOOPER:
3560 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303561 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003562 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003563 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003564 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3565 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303566 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003567 proc->pid, thread->pid);
3568 } else if (proc->requested_threads == 0) {
3569 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303570 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003571 proc->pid, thread->pid);
3572 } else {
3573 proc->requested_threads--;
3574 proc->requested_threads_started++;
3575 }
3576 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003577 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003578 break;
3579 case BC_ENTER_LOOPER:
3580 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303581 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003582 proc->pid, thread->pid);
3583 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3584 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303585 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003586 proc->pid, thread->pid);
3587 }
3588 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3589 break;
3590 case BC_EXIT_LOOPER:
3591 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303592 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003593 proc->pid, thread->pid);
3594 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3595 break;
3596
3597 case BC_REQUEST_DEATH_NOTIFICATION:
3598 case BC_CLEAR_DEATH_NOTIFICATION: {
3599 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003600 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003601 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003602 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003603
3604 if (get_user(target, (uint32_t __user *)ptr))
3605 return -EFAULT;
3606 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003607 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003608 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003609 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003610 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3611 /*
3612 * Allocate memory for death notification
3613 * before taking lock
3614 */
3615 death = kzalloc(sizeof(*death), GFP_KERNEL);
3616 if (death == NULL) {
3617 WARN_ON(thread->return_error.cmd !=
3618 BR_OK);
3619 thread->return_error.cmd = BR_ERROR;
3620 binder_enqueue_work(
3621 thread->proc,
3622 &thread->return_error.work,
3623 &thread->todo);
3624 binder_debug(
3625 BINDER_DEBUG_FAILED_TRANSACTION,
3626 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3627 proc->pid, thread->pid);
3628 break;
3629 }
3630 }
3631 binder_proc_lock(proc);
3632 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003633 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303634 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003635 proc->pid, thread->pid,
3636 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3637 "BC_REQUEST_DEATH_NOTIFICATION" :
3638 "BC_CLEAR_DEATH_NOTIFICATION",
3639 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003640 binder_proc_unlock(proc);
3641 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003642 break;
3643 }
3644
3645 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003646 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003647 proc->pid, thread->pid,
3648 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3649 "BC_REQUEST_DEATH_NOTIFICATION" :
3650 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003651 (u64)cookie, ref->data.debug_id,
3652 ref->data.desc, ref->data.strong,
3653 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003654
Martijn Coenenf9eac642017-05-22 11:26:23 -07003655 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003656 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3657 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303658 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003659 proc->pid, thread->pid);
Martijn Coenenf9eac642017-05-22 11:26:23 -07003660 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003661 binder_proc_unlock(proc);
3662 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003663 break;
3664 }
3665 binder_stats_created(BINDER_STAT_DEATH);
3666 INIT_LIST_HEAD(&death->work.entry);
3667 death->cookie = cookie;
3668 ref->death = death;
3669 if (ref->node->proc == NULL) {
3670 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003671 if (thread->looper &
3672 (BINDER_LOOPER_STATE_REGISTERED |
3673 BINDER_LOOPER_STATE_ENTERED))
3674 binder_enqueue_work(
3675 proc,
3676 &ref->death->work,
3677 &thread->todo);
3678 else {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003679 binder_inner_proc_lock(proc);
3680 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003681 &ref->death->work,
3682 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003683 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003684 proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003685 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003686 }
3687 }
3688 } else {
3689 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303690 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003691 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003692 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003693 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003694 break;
3695 }
3696 death = ref->death;
3697 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003698 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003699 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003700 (u64)death->cookie,
3701 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003702 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003703 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003704 break;
3705 }
3706 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003707 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003708 if (list_empty(&death->work.entry)) {
3709 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003710 if (thread->looper &
3711 (BINDER_LOOPER_STATE_REGISTERED |
3712 BINDER_LOOPER_STATE_ENTERED))
3713 binder_enqueue_work_ilocked(
3714 &death->work,
3715 &thread->todo);
3716 else {
3717 binder_enqueue_work_ilocked(
3718 &death->work,
3719 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003720 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003721 proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003722 }
3723 } else {
3724 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3725 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3726 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003727 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003728 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003729 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003730 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003731 } break;
3732 case BC_DEAD_BINDER_DONE: {
3733 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003734 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003735 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003736
Arve Hjønnevågda498892014-02-21 14:40:26 -08003737 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003738 return -EFAULT;
3739
Lisa Du7a64cd82016-02-17 09:32:52 +08003740 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003741 binder_inner_proc_lock(proc);
3742 list_for_each_entry(w, &proc->delivered_death,
3743 entry) {
3744 struct binder_ref_death *tmp_death =
3745 container_of(w,
3746 struct binder_ref_death,
3747 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003748
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003749 if (tmp_death->cookie == cookie) {
3750 death = tmp_death;
3751 break;
3752 }
3753 }
3754 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003755 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3756 proc->pid, thread->pid, (u64)cookie,
3757 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003758 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003759 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3760 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003761 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003762 break;
3763 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003764 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003765 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3766 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003767 if (thread->looper &
3768 (BINDER_LOOPER_STATE_REGISTERED |
3769 BINDER_LOOPER_STATE_ENTERED))
3770 binder_enqueue_work_ilocked(
3771 &death->work, &thread->todo);
3772 else {
3773 binder_enqueue_work_ilocked(
3774 &death->work,
3775 &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07003776 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003777 }
3778 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003779 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003780 } break;
3781
3782 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303783 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003784 proc->pid, thread->pid, cmd);
3785 return -EINVAL;
3786 }
3787 *consumed = ptr - buffer;
3788 }
3789 return 0;
3790}
3791
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003792static void binder_stat_br(struct binder_proc *proc,
3793 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003794{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003795 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003796 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003797 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3798 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3799 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003800 }
3801}
3802
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003803static int binder_has_thread_work(struct binder_thread *thread)
3804{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003805 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3806 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003807}
3808
Todd Kjos60792612017-05-24 10:51:01 -07003809static int binder_put_node_cmd(struct binder_proc *proc,
3810 struct binder_thread *thread,
3811 void __user **ptrp,
3812 binder_uintptr_t node_ptr,
3813 binder_uintptr_t node_cookie,
3814 int node_debug_id,
3815 uint32_t cmd, const char *cmd_name)
3816{
3817 void __user *ptr = *ptrp;
3818
3819 if (put_user(cmd, (uint32_t __user *)ptr))
3820 return -EFAULT;
3821 ptr += sizeof(uint32_t);
3822
3823 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3824 return -EFAULT;
3825 ptr += sizeof(binder_uintptr_t);
3826
3827 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3828 return -EFAULT;
3829 ptr += sizeof(binder_uintptr_t);
3830
3831 binder_stat_br(proc, thread, cmd);
3832 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3833 proc->pid, thread->pid, cmd_name, node_debug_id,
3834 (u64)node_ptr, (u64)node_cookie);
3835
3836 *ptrp = ptr;
3837 return 0;
3838}
3839
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003840static int binder_wait_for_work(struct binder_thread *thread,
3841 bool do_proc_work)
3842{
3843 DEFINE_WAIT(wait);
3844 struct binder_proc *proc = thread->proc;
3845 int ret = 0;
3846
3847 freezer_do_not_count();
3848 binder_inner_proc_lock(proc);
3849 for (;;) {
3850 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3851 if (binder_has_work_ilocked(thread, do_proc_work))
3852 break;
3853 if (do_proc_work)
3854 list_add(&thread->waiting_thread_node,
3855 &proc->waiting_threads);
3856 binder_inner_proc_unlock(proc);
3857 schedule();
3858 binder_inner_proc_lock(proc);
3859 list_del_init(&thread->waiting_thread_node);
3860 if (signal_pending(current)) {
3861 ret = -ERESTARTSYS;
3862 break;
3863 }
3864 }
3865 finish_wait(&thread->wait, &wait);
3866 binder_inner_proc_unlock(proc);
3867 freezer_count();
3868
3869 return ret;
3870}
3871
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003872static int binder_thread_read(struct binder_proc *proc,
3873 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003874 binder_uintptr_t binder_buffer, size_t size,
3875 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003876{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003877 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003878 void __user *ptr = buffer + *consumed;
3879 void __user *end = buffer + size;
3880
3881 int ret = 0;
3882 int wait_for_proc_work;
3883
3884 if (*consumed == 0) {
3885 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3886 return -EFAULT;
3887 ptr += sizeof(uint32_t);
3888 }
3889
3890retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07003891 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003892 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003893 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003894
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003895 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003896
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003897 trace_binder_wait_for_work(wait_for_proc_work,
3898 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003899 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003900 if (wait_for_proc_work) {
3901 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3902 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303903 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003904 proc->pid, thread->pid, thread->looper);
3905 wait_event_interruptible(binder_user_error_wait,
3906 binder_stop_on_user_error < 2);
3907 }
Martijn Coenenecd972d2017-05-26 10:48:56 -07003908 binder_restore_priority(current, proc->default_priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003909 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003910
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003911 if (non_block) {
3912 if (!binder_has_work(thread, wait_for_proc_work))
3913 ret = -EAGAIN;
3914 } else {
3915 ret = binder_wait_for_work(thread, wait_for_proc_work);
3916 }
3917
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003918 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3919
3920 if (ret)
3921 return ret;
3922
3923 while (1) {
3924 uint32_t cmd;
3925 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003926 struct binder_work *w = NULL;
3927 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003928 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07003929 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003930
Todd Kjose7f23ed2017-03-21 13:06:01 -07003931 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003932 if (!binder_worklist_empty_ilocked(&thread->todo))
3933 list = &thread->todo;
3934 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3935 wait_for_proc_work)
3936 list = &proc->todo;
3937 else {
3938 binder_inner_proc_unlock(proc);
3939
Dmitry Voytik395262a2014-09-08 18:16:34 +04003940 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08003941 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003942 goto retry;
3943 break;
3944 }
3945
Todd Kjose7f23ed2017-03-21 13:06:01 -07003946 if (end - ptr < sizeof(tr) + 4) {
3947 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003948 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003949 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003950 w = binder_dequeue_work_head_ilocked(list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003951
3952 switch (w->type) {
3953 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003954 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003955 t = container_of(w, struct binder_transaction, work);
3956 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003957 case BINDER_WORK_RETURN_ERROR: {
3958 struct binder_error *e = container_of(
3959 w, struct binder_error, work);
3960
3961 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003962 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07003963 if (put_user(e->cmd, (uint32_t __user *)ptr))
3964 return -EFAULT;
3965 e->cmd = BR_OK;
3966 ptr += sizeof(uint32_t);
3967
3968 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07003969 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003970 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003971 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003972 cmd = BR_TRANSACTION_COMPLETE;
3973 if (put_user(cmd, (uint32_t __user *)ptr))
3974 return -EFAULT;
3975 ptr += sizeof(uint32_t);
3976
3977 binder_stat_br(proc, thread, cmd);
3978 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303979 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003980 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003981 kfree(w);
3982 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3983 } break;
3984 case BINDER_WORK_NODE: {
3985 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07003986 int strong, weak;
3987 binder_uintptr_t node_ptr = node->ptr;
3988 binder_uintptr_t node_cookie = node->cookie;
3989 int node_debug_id = node->debug_id;
3990 int has_weak_ref;
3991 int has_strong_ref;
3992 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09003993
Todd Kjos60792612017-05-24 10:51:01 -07003994 BUG_ON(proc != node->proc);
3995 strong = node->internal_strong_refs ||
3996 node->local_strong_refs;
3997 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07003998 node->local_weak_refs ||
3999 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07004000 has_strong_ref = node->has_strong_ref;
4001 has_weak_ref = node->has_weak_ref;
4002
4003 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004004 node->has_weak_ref = 1;
4005 node->pending_weak_ref = 1;
4006 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004007 }
4008 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004009 node->has_strong_ref = 1;
4010 node->pending_strong_ref = 1;
4011 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004012 }
4013 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004014 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004015 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004016 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004017 if (!weak && !strong) {
4018 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4019 "%d:%d node %d u%016llx c%016llx deleted\n",
4020 proc->pid, thread->pid,
4021 node_debug_id,
4022 (u64)node_ptr,
4023 (u64)node_cookie);
4024 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004025 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004026 binder_node_lock(node);
4027 /*
4028 * Acquire the node lock before freeing the
4029 * node to serialize with other threads that
4030 * may have been holding the node lock while
4031 * decrementing this node (avoids race where
4032 * this thread frees while the other thread
4033 * is unlocking the node after the final
4034 * decrement)
4035 */
4036 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004037 binder_free_node(node);
4038 } else
4039 binder_inner_proc_unlock(proc);
4040
Todd Kjos60792612017-05-24 10:51:01 -07004041 if (weak && !has_weak_ref)
4042 ret = binder_put_node_cmd(
4043 proc, thread, &ptr, node_ptr,
4044 node_cookie, node_debug_id,
4045 BR_INCREFS, "BR_INCREFS");
4046 if (!ret && strong && !has_strong_ref)
4047 ret = binder_put_node_cmd(
4048 proc, thread, &ptr, node_ptr,
4049 node_cookie, node_debug_id,
4050 BR_ACQUIRE, "BR_ACQUIRE");
4051 if (!ret && !strong && has_strong_ref)
4052 ret = binder_put_node_cmd(
4053 proc, thread, &ptr, node_ptr,
4054 node_cookie, node_debug_id,
4055 BR_RELEASE, "BR_RELEASE");
4056 if (!ret && !weak && has_weak_ref)
4057 ret = binder_put_node_cmd(
4058 proc, thread, &ptr, node_ptr,
4059 node_cookie, node_debug_id,
4060 BR_DECREFS, "BR_DECREFS");
4061 if (orig_ptr == ptr)
4062 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4063 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4064 proc->pid, thread->pid,
4065 node_debug_id,
4066 (u64)node_ptr,
4067 (u64)node_cookie);
4068 if (ret)
4069 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004070 } break;
4071 case BINDER_WORK_DEAD_BINDER:
4072 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4073 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4074 struct binder_ref_death *death;
4075 uint32_t cmd;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004076 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004077
4078 death = container_of(w, struct binder_ref_death, work);
4079 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4080 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4081 else
4082 cmd = BR_DEAD_BINDER;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004083 cookie = death->cookie;
4084
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004085 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004086 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004087 proc->pid, thread->pid,
4088 cmd == BR_DEAD_BINDER ?
4089 "BR_DEAD_BINDER" :
4090 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenf9eac642017-05-22 11:26:23 -07004091 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004092 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenf9eac642017-05-22 11:26:23 -07004093 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004094 kfree(death);
4095 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004096 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004097 binder_enqueue_work_ilocked(
4098 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004099 binder_inner_proc_unlock(proc);
4100 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07004101 if (put_user(cmd, (uint32_t __user *)ptr))
4102 return -EFAULT;
4103 ptr += sizeof(uint32_t);
4104 if (put_user(cookie,
4105 (binder_uintptr_t __user *)ptr))
4106 return -EFAULT;
4107 ptr += sizeof(binder_uintptr_t);
4108 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004109 if (cmd == BR_DEAD_BINDER)
4110 goto done; /* DEAD_BINDER notifications can cause transactions */
4111 } break;
4112 }
4113
4114 if (!t)
4115 continue;
4116
4117 BUG_ON(t->buffer == NULL);
4118 if (t->buffer->target_node) {
4119 struct binder_node *target_node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004120 struct binder_priority node_prio;
Seunghun Lee10f62862014-05-01 01:30:23 +09004121
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004122 tr.target.ptr = target_node->ptr;
4123 tr.cookie = target_node->cookie;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004124 node_prio.sched_policy = target_node->sched_policy;
4125 node_prio.prio = target_node->min_priority;
Martijn Coenenc46810c2017-06-23 10:13:43 -07004126 binder_transaction_priority(current, t, node_prio,
4127 target_node->inherit_rt);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004128 cmd = BR_TRANSACTION;
4129 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004130 tr.target.ptr = 0;
4131 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004132 cmd = BR_REPLY;
4133 }
4134 tr.code = t->code;
4135 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06004136 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004137
Todd Kjos2f993e22017-05-12 14:42:55 -07004138 t_from = binder_get_txn_from(t);
4139 if (t_from) {
4140 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09004141
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004142 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08004143 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004144 } else {
4145 tr.sender_pid = 0;
4146 }
4147
4148 tr.data_size = t->buffer->data_size;
4149 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07004150 tr.data.ptr.buffer = (binder_uintptr_t)
4151 ((uintptr_t)t->buffer->data +
4152 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004153 tr.data.ptr.offsets = tr.data.ptr.buffer +
4154 ALIGN(t->buffer->data_size,
4155 sizeof(void *));
4156
Todd Kjos2f993e22017-05-12 14:42:55 -07004157 if (put_user(cmd, (uint32_t __user *)ptr)) {
4158 if (t_from)
4159 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004160 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004161 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004162 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07004163 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4164 if (t_from)
4165 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004166 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004167 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004168 ptr += sizeof(tr);
4169
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004170 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004171 binder_stat_br(proc, thread, cmd);
4172 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004173 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004174 proc->pid, thread->pid,
4175 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4176 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07004177 t->debug_id, t_from ? t_from->proc->pid : 0,
4178 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004179 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004180 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004181
Todd Kjos2f993e22017-05-12 14:42:55 -07004182 if (t_from)
4183 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004184 t->buffer->allow_user_free = 1;
4185 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07004186 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004187 t->to_parent = thread->transaction_stack;
4188 t->to_thread = thread;
4189 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07004190 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004191 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07004192 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004193 }
4194 break;
4195 }
4196
4197done:
4198
4199 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07004200 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004201 if (proc->requested_threads == 0 &&
4202 list_empty(&thread->proc->waiting_threads) &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004203 proc->requested_threads_started < proc->max_threads &&
4204 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4205 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4206 /*spawn a new thread if we leave this out */) {
4207 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07004208 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004209 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304210 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004211 proc->pid, thread->pid);
4212 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4213 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07004214 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07004215 } else
4216 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004217 return 0;
4218}
4219
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004220static void binder_release_work(struct binder_proc *proc,
4221 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004222{
4223 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09004224
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004225 while (1) {
4226 w = binder_dequeue_work_head(proc, list);
4227 if (!w)
4228 return;
4229
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004230 switch (w->type) {
4231 case BINDER_WORK_TRANSACTION: {
4232 struct binder_transaction *t;
4233
4234 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004235 if (t->buffer->target_node &&
4236 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004237 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004238 } else {
4239 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304240 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004241 t->debug_id);
Todd Kjos21ef40a2017-03-30 18:02:13 -07004242 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004243 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004244 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004245 case BINDER_WORK_RETURN_ERROR: {
4246 struct binder_error *e = container_of(
4247 w, struct binder_error, work);
4248
4249 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4250 "undelivered TRANSACTION_ERROR: %u\n",
4251 e->cmd);
4252 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004253 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004254 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304255 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004256 kfree(w);
4257 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4258 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004259 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4260 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4261 struct binder_ref_death *death;
4262
4263 death = container_of(w, struct binder_ref_death, work);
4264 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004265 "undelivered death notification, %016llx\n",
4266 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004267 kfree(death);
4268 binder_stats_deleted(BINDER_STAT_DEATH);
4269 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004270 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304271 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004272 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004273 break;
4274 }
4275 }
4276
4277}
4278
Todd Kjosb4827902017-05-25 15:52:17 -07004279static struct binder_thread *binder_get_thread_ilocked(
4280 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004281{
4282 struct binder_thread *thread = NULL;
4283 struct rb_node *parent = NULL;
4284 struct rb_node **p = &proc->threads.rb_node;
4285
4286 while (*p) {
4287 parent = *p;
4288 thread = rb_entry(parent, struct binder_thread, rb_node);
4289
4290 if (current->pid < thread->pid)
4291 p = &(*p)->rb_left;
4292 else if (current->pid > thread->pid)
4293 p = &(*p)->rb_right;
4294 else
Todd Kjosb4827902017-05-25 15:52:17 -07004295 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004296 }
Todd Kjosb4827902017-05-25 15:52:17 -07004297 if (!new_thread)
4298 return NULL;
4299 thread = new_thread;
4300 binder_stats_created(BINDER_STAT_THREAD);
4301 thread->proc = proc;
4302 thread->pid = current->pid;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004303 get_task_struct(current);
4304 thread->task = current;
Todd Kjosb4827902017-05-25 15:52:17 -07004305 atomic_set(&thread->tmp_ref, 0);
4306 init_waitqueue_head(&thread->wait);
4307 INIT_LIST_HEAD(&thread->todo);
4308 rb_link_node(&thread->rb_node, parent, p);
4309 rb_insert_color(&thread->rb_node, &proc->threads);
4310 thread->looper_need_return = true;
4311 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4312 thread->return_error.cmd = BR_OK;
4313 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4314 thread->reply_error.cmd = BR_OK;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004315 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004316 return thread;
4317}
4318
4319static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4320{
4321 struct binder_thread *thread;
4322 struct binder_thread *new_thread;
4323
4324 binder_inner_proc_lock(proc);
4325 thread = binder_get_thread_ilocked(proc, NULL);
4326 binder_inner_proc_unlock(proc);
4327 if (!thread) {
4328 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4329 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004330 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004331 binder_inner_proc_lock(proc);
4332 thread = binder_get_thread_ilocked(proc, new_thread);
4333 binder_inner_proc_unlock(proc);
4334 if (thread != new_thread)
4335 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004336 }
4337 return thread;
4338}
4339
Todd Kjos2f993e22017-05-12 14:42:55 -07004340static void binder_free_proc(struct binder_proc *proc)
4341{
4342 BUG_ON(!list_empty(&proc->todo));
4343 BUG_ON(!list_empty(&proc->delivered_death));
4344 binder_alloc_deferred_release(&proc->alloc);
4345 put_task_struct(proc->tsk);
4346 binder_stats_deleted(BINDER_STAT_PROC);
4347 kfree(proc);
4348}
4349
4350static void binder_free_thread(struct binder_thread *thread)
4351{
4352 BUG_ON(!list_empty(&thread->todo));
4353 binder_stats_deleted(BINDER_STAT_THREAD);
4354 binder_proc_dec_tmpref(thread->proc);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004355 put_task_struct(thread->task);
Todd Kjos2f993e22017-05-12 14:42:55 -07004356 kfree(thread);
4357}
4358
4359static int binder_thread_release(struct binder_proc *proc,
4360 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004361{
4362 struct binder_transaction *t;
4363 struct binder_transaction *send_reply = NULL;
4364 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004365 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004366
Todd Kjosb4827902017-05-25 15:52:17 -07004367 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004368 /*
4369 * take a ref on the proc so it survives
4370 * after we remove this thread from proc->threads.
4371 * The corresponding dec is when we actually
4372 * free the thread in binder_free_thread()
4373 */
4374 proc->tmp_ref++;
4375 /*
4376 * take a ref on this thread to ensure it
4377 * survives while we are releasing it
4378 */
4379 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004380 rb_erase(&thread->rb_node, &proc->threads);
4381 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004382 if (t) {
4383 spin_lock(&t->lock);
4384 if (t->to_thread == thread)
4385 send_reply = t;
4386 }
4387 thread->is_dead = true;
4388
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004389 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004390 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004391 active_transactions++;
4392 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304393 "release %d:%d transaction %d %s, still active\n",
4394 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004395 t->debug_id,
4396 (t->to_thread == thread) ? "in" : "out");
4397
4398 if (t->to_thread == thread) {
4399 t->to_proc = NULL;
4400 t->to_thread = NULL;
4401 if (t->buffer) {
4402 t->buffer->transaction = NULL;
4403 t->buffer = NULL;
4404 }
4405 t = t->to_parent;
4406 } else if (t->from == thread) {
4407 t->from = NULL;
4408 t = t->from_parent;
4409 } else
4410 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004411 spin_unlock(&last_t->lock);
4412 if (t)
4413 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004414 }
Todd Kjosb4827902017-05-25 15:52:17 -07004415 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004416
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004417 if (send_reply)
4418 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004419 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004420 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004421 return active_transactions;
4422}
4423
4424static unsigned int binder_poll(struct file *filp,
4425 struct poll_table_struct *wait)
4426{
4427 struct binder_proc *proc = filp->private_data;
4428 struct binder_thread *thread = NULL;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004429 bool wait_for_proc_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004430
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004431 thread = binder_get_thread(proc);
4432
Martijn Coenen995a36e2017-06-02 13:36:52 -07004433 binder_inner_proc_lock(thread->proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004434 thread->looper |= BINDER_LOOPER_STATE_POLL;
4435 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4436
Martijn Coenen995a36e2017-06-02 13:36:52 -07004437 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004438
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004439 if (binder_has_work(thread, wait_for_proc_work))
4440 return POLLIN;
4441
4442 poll_wait(filp, &thread->wait, wait);
4443
4444 if (binder_has_thread_work(thread))
4445 return POLLIN;
4446
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004447 return 0;
4448}
4449
Tair Rzayev78260ac2014-06-03 22:27:21 +03004450static int binder_ioctl_write_read(struct file *filp,
4451 unsigned int cmd, unsigned long arg,
4452 struct binder_thread *thread)
4453{
4454 int ret = 0;
4455 struct binder_proc *proc = filp->private_data;
4456 unsigned int size = _IOC_SIZE(cmd);
4457 void __user *ubuf = (void __user *)arg;
4458 struct binder_write_read bwr;
4459
4460 if (size != sizeof(struct binder_write_read)) {
4461 ret = -EINVAL;
4462 goto out;
4463 }
4464 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4465 ret = -EFAULT;
4466 goto out;
4467 }
4468 binder_debug(BINDER_DEBUG_READ_WRITE,
4469 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4470 proc->pid, thread->pid,
4471 (u64)bwr.write_size, (u64)bwr.write_buffer,
4472 (u64)bwr.read_size, (u64)bwr.read_buffer);
4473
4474 if (bwr.write_size > 0) {
4475 ret = binder_thread_write(proc, thread,
4476 bwr.write_buffer,
4477 bwr.write_size,
4478 &bwr.write_consumed);
4479 trace_binder_write_done(ret);
4480 if (ret < 0) {
4481 bwr.read_consumed = 0;
4482 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4483 ret = -EFAULT;
4484 goto out;
4485 }
4486 }
4487 if (bwr.read_size > 0) {
4488 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4489 bwr.read_size,
4490 &bwr.read_consumed,
4491 filp->f_flags & O_NONBLOCK);
4492 trace_binder_read_done(ret);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004493 binder_inner_proc_lock(proc);
4494 if (!binder_worklist_empty_ilocked(&proc->todo))
Martijn Coenen053be422017-06-06 15:17:46 -07004495 binder_wakeup_proc_ilocked(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004496 binder_inner_proc_unlock(proc);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004497 if (ret < 0) {
4498 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4499 ret = -EFAULT;
4500 goto out;
4501 }
4502 }
4503 binder_debug(BINDER_DEBUG_READ_WRITE,
4504 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4505 proc->pid, thread->pid,
4506 (u64)bwr.write_consumed, (u64)bwr.write_size,
4507 (u64)bwr.read_consumed, (u64)bwr.read_size);
4508 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4509 ret = -EFAULT;
4510 goto out;
4511 }
4512out:
4513 return ret;
4514}
4515
4516static int binder_ioctl_set_ctx_mgr(struct file *filp)
4517{
4518 int ret = 0;
4519 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004520 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004521 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004522 kuid_t curr_euid = current_euid();
4523
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004524 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004525 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004526 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4527 ret = -EBUSY;
4528 goto out;
4529 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004530 ret = security_binder_set_context_mgr(proc->tsk);
4531 if (ret < 0)
4532 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004533 if (uid_valid(context->binder_context_mgr_uid)) {
4534 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004535 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4536 from_kuid(&init_user_ns, curr_euid),
4537 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004538 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004539 ret = -EPERM;
4540 goto out;
4541 }
4542 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004543 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004544 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004545 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004546 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004547 ret = -ENOMEM;
4548 goto out;
4549 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004550 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004551 new_node->local_weak_refs++;
4552 new_node->local_strong_refs++;
4553 new_node->has_strong_ref = 1;
4554 new_node->has_weak_ref = 1;
4555 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004556 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004557 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004558out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004559 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004560 return ret;
4561}
4562
Colin Cross833babb32017-06-20 13:54:44 -07004563static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4564 struct binder_node_debug_info *info) {
4565 struct rb_node *n;
4566 binder_uintptr_t ptr = info->ptr;
4567
4568 memset(info, 0, sizeof(*info));
4569
4570 binder_inner_proc_lock(proc);
4571 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4572 struct binder_node *node = rb_entry(n, struct binder_node,
4573 rb_node);
4574 if (node->ptr > ptr) {
4575 info->ptr = node->ptr;
4576 info->cookie = node->cookie;
4577 info->has_strong_ref = node->has_strong_ref;
4578 info->has_weak_ref = node->has_weak_ref;
4579 break;
4580 }
4581 }
4582 binder_inner_proc_unlock(proc);
4583
4584 return 0;
4585}
4586
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004587static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4588{
4589 int ret;
4590 struct binder_proc *proc = filp->private_data;
4591 struct binder_thread *thread;
4592 unsigned int size = _IOC_SIZE(cmd);
4593 void __user *ubuf = (void __user *)arg;
4594
Tair Rzayev78260ac2014-06-03 22:27:21 +03004595 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4596 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004597
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004598 trace_binder_ioctl(cmd, arg);
4599
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004600 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4601 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004602 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004603
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004604 thread = binder_get_thread(proc);
4605 if (thread == NULL) {
4606 ret = -ENOMEM;
4607 goto err;
4608 }
4609
4610 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004611 case BINDER_WRITE_READ:
4612 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4613 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004614 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004615 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004616 case BINDER_SET_MAX_THREADS: {
4617 int max_threads;
4618
4619 if (copy_from_user(&max_threads, ubuf,
4620 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004621 ret = -EINVAL;
4622 goto err;
4623 }
Todd Kjosd600e902017-05-25 17:35:02 -07004624 binder_inner_proc_lock(proc);
4625 proc->max_threads = max_threads;
4626 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004627 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004628 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004629 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004630 ret = binder_ioctl_set_ctx_mgr(filp);
4631 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004632 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004633 break;
4634 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304635 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004636 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004637 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004638 thread = NULL;
4639 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004640 case BINDER_VERSION: {
4641 struct binder_version __user *ver = ubuf;
4642
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004643 if (size != sizeof(struct binder_version)) {
4644 ret = -EINVAL;
4645 goto err;
4646 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004647 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4648 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004649 ret = -EINVAL;
4650 goto err;
4651 }
4652 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004653 }
Colin Cross833babb32017-06-20 13:54:44 -07004654 case BINDER_GET_NODE_DEBUG_INFO: {
4655 struct binder_node_debug_info info;
4656
4657 if (copy_from_user(&info, ubuf, sizeof(info))) {
4658 ret = -EFAULT;
4659 goto err;
4660 }
4661
4662 ret = binder_ioctl_get_node_debug_info(proc, &info);
4663 if (ret < 0)
4664 goto err;
4665
4666 if (copy_to_user(ubuf, &info, sizeof(info))) {
4667 ret = -EFAULT;
4668 goto err;
4669 }
4670 break;
4671 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004672 default:
4673 ret = -EINVAL;
4674 goto err;
4675 }
4676 ret = 0;
4677err:
4678 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004679 thread->looper_need_return = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004680 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4681 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304682 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004683err_unlocked:
4684 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004685 return ret;
4686}
4687
4688static void binder_vma_open(struct vm_area_struct *vma)
4689{
4690 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004691
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004692 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304693 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004694 proc->pid, vma->vm_start, vma->vm_end,
4695 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4696 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004697}
4698
4699static void binder_vma_close(struct vm_area_struct *vma)
4700{
4701 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004702
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004703 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304704 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004705 proc->pid, vma->vm_start, vma->vm_end,
4706 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4707 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004708 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004709 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4710}
4711
Vinayak Menonddac7d52014-06-02 18:17:59 +05304712static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4713{
4714 return VM_FAULT_SIGBUS;
4715}
4716
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004717static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004718 .open = binder_vma_open,
4719 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304720 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004721};
4722
Todd Kjosd325d372016-10-10 10:40:53 -07004723static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4724{
4725 int ret;
4726 struct binder_proc *proc = filp->private_data;
4727 const char *failure_string;
4728
4729 if (proc->tsk != current->group_leader)
4730 return -EINVAL;
4731
4732 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4733 vma->vm_end = vma->vm_start + SZ_4M;
4734
4735 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4736 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4737 __func__, proc->pid, vma->vm_start, vma->vm_end,
4738 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4739 (unsigned long)pgprot_val(vma->vm_page_prot));
4740
4741 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4742 ret = -EPERM;
4743 failure_string = "bad vm_flags";
4744 goto err_bad_arg;
4745 }
4746 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4747 vma->vm_ops = &binder_vm_ops;
4748 vma->vm_private_data = proc;
4749
4750 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4751 if (ret)
4752 return ret;
4753 proc->files = get_files_struct(current);
4754 return 0;
4755
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004756err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004757 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004758 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4759 return ret;
4760}
4761
4762static int binder_open(struct inode *nodp, struct file *filp)
4763{
4764 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004765 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004766
4767 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4768 current->group_leader->pid, current->pid);
4769
4770 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4771 if (proc == NULL)
4772 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004773 spin_lock_init(&proc->inner_lock);
4774 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004775 get_task_struct(current->group_leader);
4776 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004777 INIT_LIST_HEAD(&proc->todo);
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004778 if (binder_supported_policy(current->policy)) {
4779 proc->default_priority.sched_policy = current->policy;
4780 proc->default_priority.prio = current->normal_prio;
4781 } else {
4782 proc->default_priority.sched_policy = SCHED_NORMAL;
4783 proc->default_priority.prio = NICE_TO_PRIO(0);
4784 }
4785
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004786 binder_dev = container_of(filp->private_data, struct binder_device,
4787 miscdev);
4788 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004789 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004790
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004791 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004792 proc->pid = current->group_leader->pid;
4793 INIT_LIST_HEAD(&proc->delivered_death);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004794 INIT_LIST_HEAD(&proc->waiting_threads);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004795 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004796
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004797 mutex_lock(&binder_procs_lock);
4798 hlist_add_head(&proc->proc_node, &binder_procs);
4799 mutex_unlock(&binder_procs_lock);
4800
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004801 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004802 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004803
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004804 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004805 /*
4806 * proc debug entries are shared between contexts, so
4807 * this will fail if the process tries to open the driver
4808 * again with a different context. The priting code will
4809 * anyway print all contexts that a given PID has, so this
4810 * is not a problem.
4811 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004812 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004813 binder_debugfs_dir_entry_proc,
4814 (void *)(unsigned long)proc->pid,
4815 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004816 }
4817
4818 return 0;
4819}
4820
4821static int binder_flush(struct file *filp, fl_owner_t id)
4822{
4823 struct binder_proc *proc = filp->private_data;
4824
4825 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4826
4827 return 0;
4828}
4829
4830static void binder_deferred_flush(struct binder_proc *proc)
4831{
4832 struct rb_node *n;
4833 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004834
Todd Kjosb4827902017-05-25 15:52:17 -07004835 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004836 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4837 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004838
Todd Kjos6798e6d2017-01-06 14:19:25 -08004839 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004840 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4841 wake_up_interruptible(&thread->wait);
4842 wake_count++;
4843 }
4844 }
Todd Kjosb4827902017-05-25 15:52:17 -07004845 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004846
4847 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4848 "binder_flush: %d woke %d threads\n", proc->pid,
4849 wake_count);
4850}
4851
4852static int binder_release(struct inode *nodp, struct file *filp)
4853{
4854 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004855
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004856 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004857 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4858
4859 return 0;
4860}
4861
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004862static int binder_node_release(struct binder_node *node, int refs)
4863{
4864 struct binder_ref *ref;
4865 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004866 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004867
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004868 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004869
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004870 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004871 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004872 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004873 /*
4874 * The caller must have taken a temporary ref on the node,
4875 */
4876 BUG_ON(!node->tmp_refs);
4877 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004878 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004879 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004880 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004881
4882 return refs;
4883 }
4884
4885 node->proc = NULL;
4886 node->local_strong_refs = 0;
4887 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004888 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004889
4890 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004891 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004892 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004893
4894 hlist_for_each_entry(ref, &node->refs, node_entry) {
4895 refs++;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004896 /*
4897 * Need the node lock to synchronize
4898 * with new notification requests and the
4899 * inner lock to synchronize with queued
4900 * death notifications.
4901 */
4902 binder_inner_proc_lock(ref->proc);
4903 if (!ref->death) {
4904 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08004905 continue;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004906 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004907
4908 death++;
4909
Martijn Coenenf9eac642017-05-22 11:26:23 -07004910 BUG_ON(!list_empty(&ref->death->work.entry));
4911 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4912 binder_enqueue_work_ilocked(&ref->death->work,
4913 &ref->proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07004914 binder_wakeup_proc_ilocked(ref->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004915 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004916 }
4917
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004918 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4919 "node %d now dead, refs %d, death %d\n",
4920 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004921 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004922 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004923
4924 return refs;
4925}
4926
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004927static void binder_deferred_release(struct binder_proc *proc)
4928{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004929 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004930 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07004931 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004932
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004933 BUG_ON(proc->files);
4934
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004935 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004936 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004937 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004938
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004939 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004940 if (context->binder_context_mgr_node &&
4941 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004942 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004943 "%s: %d context_mgr_node gone\n",
4944 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004945 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004946 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004947 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07004948 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004949 /*
4950 * Make sure proc stays alive after we
4951 * remove all the threads
4952 */
4953 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004954
Todd Kjos2f993e22017-05-12 14:42:55 -07004955 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004956 threads = 0;
4957 active_transactions = 0;
4958 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004959 struct binder_thread *thread;
4960
4961 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004962 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004963 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07004964 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07004965 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004966 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004967
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004968 nodes = 0;
4969 incoming_refs = 0;
4970 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004971 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004972
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004973 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004974 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07004975 /*
4976 * take a temporary ref on the node before
4977 * calling binder_node_release() which will either
4978 * kfree() the node or call binder_put_node()
4979 */
Todd Kjos425d23f2017-06-12 12:07:26 -07004980 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004981 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07004982 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004983 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07004984 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004985 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004986 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004987
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004988 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07004989 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004990 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004991 struct binder_ref *ref;
4992
4993 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004994 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07004995 binder_cleanup_ref_olocked(ref);
4996 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004997 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07004998 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004999 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005000 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005001
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005002 binder_release_work(proc, &proc->todo);
5003 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005004
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005005 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07005006 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005007 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07005008 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005009
Todd Kjos2f993e22017-05-12 14:42:55 -07005010 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005011}
5012
5013static void binder_deferred_func(struct work_struct *work)
5014{
5015 struct binder_proc *proc;
5016 struct files_struct *files;
5017
5018 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09005019
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005020 do {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005021 mutex_lock(&binder_deferred_lock);
5022 if (!hlist_empty(&binder_deferred_list)) {
5023 proc = hlist_entry(binder_deferred_list.first,
5024 struct binder_proc, deferred_work_node);
5025 hlist_del_init(&proc->deferred_work_node);
5026 defer = proc->deferred_work;
5027 proc->deferred_work = 0;
5028 } else {
5029 proc = NULL;
5030 defer = 0;
5031 }
5032 mutex_unlock(&binder_deferred_lock);
5033
5034 files = NULL;
5035 if (defer & BINDER_DEFERRED_PUT_FILES) {
5036 files = proc->files;
5037 if (files)
5038 proc->files = NULL;
5039 }
5040
5041 if (defer & BINDER_DEFERRED_FLUSH)
5042 binder_deferred_flush(proc);
5043
5044 if (defer & BINDER_DEFERRED_RELEASE)
5045 binder_deferred_release(proc); /* frees proc */
5046
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005047 if (files)
5048 put_files_struct(files);
5049 } while (proc);
5050}
5051static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5052
5053static void
5054binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5055{
5056 mutex_lock(&binder_deferred_lock);
5057 proc->deferred_work |= defer;
5058 if (hlist_unhashed(&proc->deferred_work_node)) {
5059 hlist_add_head(&proc->deferred_work_node,
5060 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05305061 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005062 }
5063 mutex_unlock(&binder_deferred_lock);
5064}
5065
Todd Kjos6d241a42017-04-21 14:32:11 -07005066static void print_binder_transaction_ilocked(struct seq_file *m,
5067 struct binder_proc *proc,
5068 const char *prefix,
5069 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005070{
Todd Kjos6d241a42017-04-21 14:32:11 -07005071 struct binder_proc *to_proc;
5072 struct binder_buffer *buffer = t->buffer;
5073
5074 WARN_ON(!spin_is_locked(&proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07005075 spin_lock(&t->lock);
Todd Kjos6d241a42017-04-21 14:32:11 -07005076 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005077 seq_printf(m,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005078 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005079 prefix, t->debug_id, t,
5080 t->from ? t->from->proc->pid : 0,
5081 t->from ? t->from->pid : 0,
Todd Kjos6d241a42017-04-21 14:32:11 -07005082 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005083 t->to_thread ? t->to_thread->pid : 0,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005084 t->code, t->flags, t->priority.sched_policy,
5085 t->priority.prio, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07005086 spin_unlock(&t->lock);
5087
Todd Kjos6d241a42017-04-21 14:32:11 -07005088 if (proc != to_proc) {
5089 /*
5090 * Can only safely deref buffer if we are holding the
5091 * correct proc inner lock for this node
5092 */
5093 seq_puts(m, "\n");
5094 return;
5095 }
5096
5097 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005098 seq_puts(m, " buffer free\n");
5099 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005100 }
Todd Kjos6d241a42017-04-21 14:32:11 -07005101 if (buffer->target_node)
5102 seq_printf(m, " node %d", buffer->target_node->debug_id);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005103 seq_printf(m, " size %zd:%zd data %p\n",
Todd Kjos6d241a42017-04-21 14:32:11 -07005104 buffer->data_size, buffer->offsets_size,
5105 buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005106}
5107
Todd Kjos6d241a42017-04-21 14:32:11 -07005108static void print_binder_work_ilocked(struct seq_file *m,
5109 struct binder_proc *proc,
5110 const char *prefix,
5111 const char *transaction_prefix,
5112 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005113{
5114 struct binder_node *node;
5115 struct binder_transaction *t;
5116
5117 switch (w->type) {
5118 case BINDER_WORK_TRANSACTION:
5119 t = container_of(w, struct binder_transaction, work);
Todd Kjos6d241a42017-04-21 14:32:11 -07005120 print_binder_transaction_ilocked(
5121 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005122 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07005123 case BINDER_WORK_RETURN_ERROR: {
5124 struct binder_error *e = container_of(
5125 w, struct binder_error, work);
5126
5127 seq_printf(m, "%stransaction error: %u\n",
5128 prefix, e->cmd);
5129 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005130 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005131 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005132 break;
5133 case BINDER_WORK_NODE:
5134 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08005135 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5136 prefix, node->debug_id,
5137 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005138 break;
5139 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005140 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005141 break;
5142 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005143 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005144 break;
5145 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005146 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005147 break;
5148 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005149 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005150 break;
5151 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005152}
5153
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005154static void print_binder_thread_ilocked(struct seq_file *m,
5155 struct binder_thread *thread,
5156 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005157{
5158 struct binder_transaction *t;
5159 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005160 size_t start_pos = m->count;
5161 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005162
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005163 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07005164 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08005165 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07005166 thread->looper_need_return,
5167 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005168 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005169 t = thread->transaction_stack;
5170 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005171 if (t->from == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005172 print_binder_transaction_ilocked(m, thread->proc,
5173 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005174 t = t->from_parent;
5175 } else if (t->to_thread == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005176 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005177 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005178 t = t->to_parent;
5179 } else {
Todd Kjos6d241a42017-04-21 14:32:11 -07005180 print_binder_transaction_ilocked(m, thread->proc,
5181 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005182 t = NULL;
5183 }
5184 }
5185 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005186 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005187 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005188 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005189 if (!print_always && m->count == header_pos)
5190 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005191}
5192
Todd Kjos425d23f2017-06-12 12:07:26 -07005193static void print_binder_node_nilocked(struct seq_file *m,
5194 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005195{
5196 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005197 struct binder_work *w;
5198 int count;
5199
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005200 WARN_ON(!spin_is_locked(&node->lock));
Todd Kjos425d23f2017-06-12 12:07:26 -07005201 if (node->proc)
5202 WARN_ON(!spin_is_locked(&node->proc->inner_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005203
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005204 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08005205 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005206 count++;
5207
Martijn Coenen6aac9792017-06-07 09:29:14 -07005208 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08005209 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Martijn Coenen6aac9792017-06-07 09:29:14 -07005210 node->sched_policy, node->min_priority,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005211 node->has_strong_ref, node->has_weak_ref,
5212 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07005213 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005214 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005215 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08005216 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005217 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005218 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005219 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005220 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005221 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005222 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005223 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005224 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005225}
5226
Todd Kjos5346bf32016-10-20 16:43:34 -07005227static void print_binder_ref_olocked(struct seq_file *m,
5228 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005229{
Todd Kjos5346bf32016-10-20 16:43:34 -07005230 WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005231 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005232 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5233 ref->data.debug_id, ref->data.desc,
5234 ref->node->proc ? "" : "dead ",
5235 ref->node->debug_id, ref->data.strong,
5236 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005237 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005238}
5239
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005240static void print_binder_proc(struct seq_file *m,
5241 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005242{
5243 struct binder_work *w;
5244 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005245 size_t start_pos = m->count;
5246 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07005247 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005248
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005249 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005250 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005251 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005252
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005253 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005254 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005255 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005256 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07005257
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005258 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005259 struct binder_node *node = rb_entry(n, struct binder_node,
5260 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005261 /*
5262 * take a temporary reference on the node so it
5263 * survives and isn't removed from the tree
5264 * while we print it.
5265 */
5266 binder_inc_node_tmpref_ilocked(node);
5267 /* Need to drop inner lock to take node lock */
5268 binder_inner_proc_unlock(proc);
5269 if (last_node)
5270 binder_put_node(last_node);
5271 binder_node_inner_lock(node);
5272 print_binder_node_nilocked(m, node);
5273 binder_node_inner_unlock(node);
5274 last_node = node;
5275 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005276 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005277 binder_inner_proc_unlock(proc);
5278 if (last_node)
5279 binder_put_node(last_node);
5280
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005281 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07005282 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005283 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005284 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005285 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07005286 print_binder_ref_olocked(m, rb_entry(n,
5287 struct binder_ref,
5288 rb_node_desc));
5289 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005290 }
Todd Kjosd325d372016-10-10 10:40:53 -07005291 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005292 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005293 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005294 print_binder_work_ilocked(m, proc, " ",
5295 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005296 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005297 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005298 break;
5299 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005300 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005301 if (!print_all && m->count == header_pos)
5302 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005303}
5304
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005305static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005306 "BR_ERROR",
5307 "BR_OK",
5308 "BR_TRANSACTION",
5309 "BR_REPLY",
5310 "BR_ACQUIRE_RESULT",
5311 "BR_DEAD_REPLY",
5312 "BR_TRANSACTION_COMPLETE",
5313 "BR_INCREFS",
5314 "BR_ACQUIRE",
5315 "BR_RELEASE",
5316 "BR_DECREFS",
5317 "BR_ATTEMPT_ACQUIRE",
5318 "BR_NOOP",
5319 "BR_SPAWN_LOOPER",
5320 "BR_FINISHED",
5321 "BR_DEAD_BINDER",
5322 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5323 "BR_FAILED_REPLY"
5324};
5325
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005326static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005327 "BC_TRANSACTION",
5328 "BC_REPLY",
5329 "BC_ACQUIRE_RESULT",
5330 "BC_FREE_BUFFER",
5331 "BC_INCREFS",
5332 "BC_ACQUIRE",
5333 "BC_RELEASE",
5334 "BC_DECREFS",
5335 "BC_INCREFS_DONE",
5336 "BC_ACQUIRE_DONE",
5337 "BC_ATTEMPT_ACQUIRE",
5338 "BC_REGISTER_LOOPER",
5339 "BC_ENTER_LOOPER",
5340 "BC_EXIT_LOOPER",
5341 "BC_REQUEST_DEATH_NOTIFICATION",
5342 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02005343 "BC_DEAD_BINDER_DONE",
5344 "BC_TRANSACTION_SG",
5345 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005346};
5347
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005348static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005349 "proc",
5350 "thread",
5351 "node",
5352 "ref",
5353 "death",
5354 "transaction",
5355 "transaction_complete"
5356};
5357
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005358static void print_binder_stats(struct seq_file *m, const char *prefix,
5359 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005360{
5361 int i;
5362
5363 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005364 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005365 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005366 int temp = atomic_read(&stats->bc[i]);
5367
5368 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005369 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005370 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005371 }
5372
5373 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005374 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005375 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005376 int temp = atomic_read(&stats->br[i]);
5377
5378 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005379 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005380 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005381 }
5382
5383 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005384 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005385 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005386 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005387 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005388 int created = atomic_read(&stats->obj_created[i]);
5389 int deleted = atomic_read(&stats->obj_deleted[i]);
5390
5391 if (created || deleted)
5392 seq_printf(m, "%s%s: active %d total %d\n",
5393 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005394 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005395 created - deleted,
5396 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005397 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005398}
5399
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005400static void print_binder_proc_stats(struct seq_file *m,
5401 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005402{
5403 struct binder_work *w;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005404 struct binder_thread *thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005405 struct rb_node *n;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005406 int count, strong, weak, ready_threads;
Todd Kjosb4827902017-05-25 15:52:17 -07005407 size_t free_async_space =
5408 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005409
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005410 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005411 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005412 count = 0;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005413 ready_threads = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005414 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005415 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5416 count++;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005417
5418 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5419 ready_threads++;
5420
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005421 seq_printf(m, " threads: %d\n", count);
5422 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005423 " ready threads %d\n"
5424 " free async space %zd\n", proc->requested_threads,
5425 proc->requested_threads_started, proc->max_threads,
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005426 ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005427 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005428 count = 0;
5429 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5430 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005431 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005432 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005433 count = 0;
5434 strong = 0;
5435 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005436 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005437 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5438 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5439 rb_node_desc);
5440 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005441 strong += ref->data.strong;
5442 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005443 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005444 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005445 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005446
Todd Kjosd325d372016-10-10 10:40:53 -07005447 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005448 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005449
5450 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005451 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005452 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005453 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005454 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005455 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005456 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005457 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005458
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005459 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005460}
5461
5462
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005463static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005464{
5465 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005466 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005467 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005468
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005469 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005470
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005471 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005472 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005473 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005474 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5475 /*
5476 * take a temporary reference on the node so it
5477 * survives and isn't removed from the list
5478 * while we print it.
5479 */
5480 node->tmp_refs++;
5481 spin_unlock(&binder_dead_nodes_lock);
5482 if (last_node)
5483 binder_put_node(last_node);
5484 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005485 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005486 binder_node_unlock(node);
5487 last_node = node;
5488 spin_lock(&binder_dead_nodes_lock);
5489 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005490 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005491 if (last_node)
5492 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005493
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005494 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005495 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005496 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005497 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005498
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005499 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005500}
5501
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005502static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005503{
5504 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005505
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005506 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005507
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005508 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005509
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005510 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005511 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005512 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005513 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005514
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005515 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005516}
5517
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005518static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005519{
5520 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005521
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005522 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005523 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005524 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005525 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005526 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005527
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005528 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005529}
5530
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005531static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005532{
Riley Andrews83050a42016-02-09 21:05:33 -08005533 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005534 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005535
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005536 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005537 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005538 if (itr->pid == pid) {
5539 seq_puts(m, "binder proc state:\n");
5540 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005541 }
5542 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005543 mutex_unlock(&binder_procs_lock);
5544
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005545 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005546}
5547
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005548static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005549 struct binder_transaction_log_entry *e)
5550{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005551 int debug_id = READ_ONCE(e->debug_id_done);
5552 /*
5553 * read barrier to guarantee debug_id_done read before
5554 * we print the log values
5555 */
5556 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005557 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005558 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005559 e->debug_id, (e->call_type == 2) ? "reply" :
5560 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005561 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005562 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5563 e->return_error, e->return_error_param,
5564 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005565 /*
5566 * read-barrier to guarantee read of debug_id_done after
5567 * done printing the fields of the entry
5568 */
5569 smp_rmb();
5570 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5571 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005572}
5573
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005574static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005575{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005576 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005577 unsigned int log_cur = atomic_read(&log->cur);
5578 unsigned int count;
5579 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005580 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005581
Todd Kjos1cfe6272017-05-24 13:33:28 -07005582 count = log_cur + 1;
5583 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5584 0 : count % ARRAY_SIZE(log->entry);
5585 if (count > ARRAY_SIZE(log->entry) || log->full)
5586 count = ARRAY_SIZE(log->entry);
5587 for (i = 0; i < count; i++) {
5588 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5589
5590 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005591 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005592 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005593}
5594
5595static const struct file_operations binder_fops = {
5596 .owner = THIS_MODULE,
5597 .poll = binder_poll,
5598 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005599 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005600 .mmap = binder_mmap,
5601 .open = binder_open,
5602 .flush = binder_flush,
5603 .release = binder_release,
5604};
5605
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005606BINDER_DEBUG_ENTRY(state);
5607BINDER_DEBUG_ENTRY(stats);
5608BINDER_DEBUG_ENTRY(transactions);
5609BINDER_DEBUG_ENTRY(transaction_log);
5610
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005611static int __init init_binder_device(const char *name)
5612{
5613 int ret;
5614 struct binder_device *binder_device;
5615
5616 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5617 if (!binder_device)
5618 return -ENOMEM;
5619
5620 binder_device->miscdev.fops = &binder_fops;
5621 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5622 binder_device->miscdev.name = name;
5623
5624 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5625 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005626 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005627
5628 ret = misc_register(&binder_device->miscdev);
5629 if (ret < 0) {
5630 kfree(binder_device);
5631 return ret;
5632 }
5633
5634 hlist_add_head(&binder_device->hlist, &binder_devices);
5635
5636 return ret;
5637}
5638
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005639static int __init binder_init(void)
5640{
5641 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005642 char *device_name, *device_names;
5643 struct binder_device *device;
5644 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005645
Todd Kjos1cfe6272017-05-24 13:33:28 -07005646 atomic_set(&binder_transaction_log.cur, ~0U);
5647 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5648
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005649 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5650 if (binder_debugfs_dir_entry_root)
5651 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5652 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005653
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005654 if (binder_debugfs_dir_entry_root) {
5655 debugfs_create_file("state",
5656 S_IRUGO,
5657 binder_debugfs_dir_entry_root,
5658 NULL,
5659 &binder_state_fops);
5660 debugfs_create_file("stats",
5661 S_IRUGO,
5662 binder_debugfs_dir_entry_root,
5663 NULL,
5664 &binder_stats_fops);
5665 debugfs_create_file("transactions",
5666 S_IRUGO,
5667 binder_debugfs_dir_entry_root,
5668 NULL,
5669 &binder_transactions_fops);
5670 debugfs_create_file("transaction_log",
5671 S_IRUGO,
5672 binder_debugfs_dir_entry_root,
5673 &binder_transaction_log,
5674 &binder_transaction_log_fops);
5675 debugfs_create_file("failed_transaction_log",
5676 S_IRUGO,
5677 binder_debugfs_dir_entry_root,
5678 &binder_transaction_log_failed,
5679 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005680 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005681
5682 /*
5683 * Copy the module_parameter string, because we don't want to
5684 * tokenize it in-place.
5685 */
5686 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5687 if (!device_names) {
5688 ret = -ENOMEM;
5689 goto err_alloc_device_names_failed;
5690 }
5691 strcpy(device_names, binder_devices_param);
5692
5693 while ((device_name = strsep(&device_names, ","))) {
5694 ret = init_binder_device(device_name);
5695 if (ret)
5696 goto err_init_binder_device_failed;
5697 }
5698
5699 return ret;
5700
5701err_init_binder_device_failed:
5702 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5703 misc_deregister(&device->miscdev);
5704 hlist_del(&device->hlist);
5705 kfree(device);
5706 }
5707err_alloc_device_names_failed:
5708 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5709
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005710 return ret;
5711}
5712
5713device_initcall(binder_init);
5714
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005715#define CREATE_TRACE_POINTS
5716#include "binder_trace.h"
5717
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005718MODULE_LICENSE("GPL v2");