blob: b02a0b33ca7e13dbf11e6defa7863cccff339393 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
Martijn Coenen22d64e4322017-06-02 11:15:44 -070031 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070079#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070080#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090081
Todd Kjos8d9f6f32016-10-17 12:33:15 -070082static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090083static DEFINE_MUTEX(binder_deferred_lock);
84
Martijn Coenen6b7c7122016-09-30 16:08:09 +020085static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090086static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070087static DEFINE_MUTEX(binder_procs_lock);
88
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090089static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070090static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070092static struct dentry *binder_debugfs_dir_entry_root;
93static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070094static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090095
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070096#define BINDER_DEBUG_ENTRY(name) \
97static int binder_##name##_open(struct inode *inode, struct file *file) \
98{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070099 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700100} \
101\
102static const struct file_operations binder_##name##_fops = { \
103 .owner = THIS_MODULE, \
104 .open = binder_##name##_open, \
105 .read = seq_read, \
106 .llseek = seq_lseek, \
107 .release = single_release, \
108}
109
110static int binder_proc_show(struct seq_file *m, void *unused);
111BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900112
113/* This is only defined in include/asm-arm/sizes.h */
114#ifndef SZ_1K
115#define SZ_1K 0x400
116#endif
117
118#ifndef SZ_4M
119#define SZ_4M 0x400000
120#endif
121
122#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
123
124#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
125
126enum {
127 BINDER_DEBUG_USER_ERROR = 1U << 0,
128 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
129 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
130 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
131 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
132 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
133 BINDER_DEBUG_READ_WRITE = 1U << 6,
134 BINDER_DEBUG_USER_REFS = 1U << 7,
135 BINDER_DEBUG_THREADS = 1U << 8,
136 BINDER_DEBUG_TRANSACTION = 1U << 9,
137 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
138 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
139 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700140 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700141 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900142};
143static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
144 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
145module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
146
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200147static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
148module_param_named(devices, binder_devices_param, charp, S_IRUGO);
149
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900150static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
151static int binder_stop_on_user_error;
152
153static int binder_set_stop_on_user_error(const char *val,
154 struct kernel_param *kp)
155{
156 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900157
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900158 ret = param_set_int(val, kp);
159 if (binder_stop_on_user_error < 2)
160 wake_up(&binder_user_error_wait);
161 return ret;
162}
163module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
164 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
165
166#define binder_debug(mask, x...) \
167 do { \
168 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400169 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900170 } while (0)
171
172#define binder_user_error(x...) \
173 do { \
174 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400175 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900176 if (binder_stop_on_user_error) \
177 binder_stop_on_user_error = 2; \
178 } while (0)
179
Martijn Coenen00c80372016-07-13 12:06:49 +0200180#define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
182
183#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184
Martijn Coenen5a6da532016-09-30 14:10:07 +0200185#define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
187
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200188#define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
190
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900191enum binder_stat_types {
192 BINDER_STAT_PROC,
193 BINDER_STAT_THREAD,
194 BINDER_STAT_NODE,
195 BINDER_STAT_REF,
196 BINDER_STAT_DEATH,
197 BINDER_STAT_TRANSACTION,
198 BINDER_STAT_TRANSACTION_COMPLETE,
199 BINDER_STAT_COUNT
200};
201
202struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700203 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
204 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
205 atomic_t obj_created[BINDER_STAT_COUNT];
206 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900207};
208
209static struct binder_stats binder_stats;
210
211static inline void binder_stats_deleted(enum binder_stat_types type)
212{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700213 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900214}
215
216static inline void binder_stats_created(enum binder_stat_types type)
217{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700218 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900219}
220
221struct binder_transaction_log_entry {
222 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700223 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900224 int call_type;
225 int from_proc;
226 int from_thread;
227 int target_handle;
228 int to_proc;
229 int to_thread;
230 int to_node;
231 int data_size;
232 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700233 int return_error_line;
234 uint32_t return_error;
235 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200236 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900237};
238struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700239 atomic_t cur;
240 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900241 struct binder_transaction_log_entry entry[32];
242};
243static struct binder_transaction_log binder_transaction_log;
244static struct binder_transaction_log binder_transaction_log_failed;
245
246static struct binder_transaction_log_entry *binder_transaction_log_add(
247 struct binder_transaction_log *log)
248{
249 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700250 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900251
Todd Kjos1cfe6272017-05-24 13:33:28 -0700252 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900253 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700254 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
255 WRITE_ONCE(e->debug_id_done, 0);
256 /*
257 * write-barrier to synchronize access to e->debug_id_done.
258 * We make sure the initialized 0 value is seen before
259 * memset() other fields are zeroed by memset.
260 */
261 smp_wmb();
262 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900263 return e;
264}
265
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200266struct binder_context {
267 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700268 struct mutex context_mgr_node_lock;
269
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200270 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200271 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200272};
273
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200274struct binder_device {
275 struct hlist_node hlist;
276 struct miscdevice miscdev;
277 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200278};
279
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700280/**
281 * struct binder_work - work enqueued on a worklist
282 * @entry: node enqueued on list
283 * @type: type of work to be performed
284 *
285 * There are separate work lists for proc, thread, and node (async).
286 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900287struct binder_work {
288 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700289
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900290 enum {
291 BINDER_WORK_TRANSACTION = 1,
292 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700293 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900294 BINDER_WORK_NODE,
295 BINDER_WORK_DEAD_BINDER,
296 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
297 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
298 } type;
299};
300
Todd Kjos858b8da2017-04-21 17:35:12 -0700301struct binder_error {
302 struct binder_work work;
303 uint32_t cmd;
304};
305
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700306/**
307 * struct binder_node - binder node bookkeeping
308 * @debug_id: unique ID for debugging
309 * (invariant after initialized)
310 * @lock: lock for node fields
311 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700312 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700313 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700314 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700315 * @dead_node: element for binder_dead_nodes list
316 * (protected by binder_dead_nodes_lock)
317 * @proc: binder_proc that owns this node
318 * (invariant after initialized)
319 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700320 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700323 * (protected by @proc->inner_lock if @proc
324 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700325 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700326 * (protected by @proc->inner_lock if @proc
327 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700328 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700329 * (protected by @proc->inner_lock if @proc
330 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700331 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700342 * (protected by @proc->inner_lock if @proc
343 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700344 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700345 * (protected by @proc->inner_lock if @proc
346 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700347 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700348 * (protected by @proc->inner_lock if @proc
349 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700350 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700351 * (protected by @proc->inner_lock if @proc
352 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700353 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700354 * (protected by @lock)
Martijn Coenen6aac9792017-06-07 09:29:14 -0700355 * @sched_policy: minimum scheduling policy for node
356 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
Martijn Coenenc46810c2017-06-23 10:13:43 -0700361 * @inherit_rt: inherit RT scheduling policy from caller
362 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700363 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700364 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700365 *
366 * Bookkeeping structure for binder nodes.
367 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900368struct binder_node {
369 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700370 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900371 struct binder_work work;
372 union {
373 struct rb_node rb_node;
374 struct hlist_node dead_node;
375 };
376 struct binder_proc *proc;
377 struct hlist_head refs;
378 int internal_strong_refs;
379 int local_weak_refs;
380 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700381 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800382 binder_uintptr_t ptr;
383 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700384 struct {
385 /*
386 * bitfield elements protected by
387 * proc inner_lock
388 */
389 u8 has_strong_ref:1;
390 u8 pending_strong_ref:1;
391 u8 has_weak_ref:1;
392 u8 pending_weak_ref:1;
393 };
394 struct {
395 /*
396 * invariant after initialization
397 */
Martijn Coenen6aac9792017-06-07 09:29:14 -0700398 u8 sched_policy:2;
Martijn Coenenc46810c2017-06-23 10:13:43 -0700399 u8 inherit_rt:1;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700400 u8 accept_fds:1;
401 u8 min_priority;
402 };
403 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900404 struct list_head async_todo;
405};
406
407struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700408 /**
409 * @work: worklist element for death notifications
410 * (protected by inner_lock of the proc that
411 * this ref belongs to)
412 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900413 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800414 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900415};
416
Todd Kjosb0117bb2017-05-08 09:16:27 -0700417/**
418 * struct binder_ref_data - binder_ref counts and id
419 * @debug_id: unique ID for the ref
420 * @desc: unique userspace handle for ref
421 * @strong: strong ref count (debugging only if not locked)
422 * @weak: weak ref count (debugging only if not locked)
423 *
424 * Structure to hold ref count and ref id information. Since
425 * the actual ref can only be accessed with a lock, this structure
426 * is used to return information about the ref to callers of
427 * ref inc/dec functions.
428 */
429struct binder_ref_data {
430 int debug_id;
431 uint32_t desc;
432 int strong;
433 int weak;
434};
435
436/**
437 * struct binder_ref - struct to track references on nodes
438 * @data: binder_ref_data containing id, handle, and current refcounts
439 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
440 * @rb_node_node: node for lookup by @node in proc's rb_tree
441 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700442 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700443 * @proc: binder_proc containing ref
444 * @node: binder_node of target node. When cleaning up a
445 * ref for deletion in binder_cleanup_ref, a non-NULL
446 * @node indicates the node must be freed
447 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenf9eac642017-05-22 11:26:23 -0700448 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700449 *
450 * Structure to track references from procA to target node (on procB). This
451 * structure is unsafe to access without holding @proc->outer_lock.
452 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900453struct binder_ref {
454 /* Lookups needed: */
455 /* node + proc => ref (transaction) */
456 /* desc + proc => ref (transaction, inc/dec ref) */
457 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700458 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900459 struct rb_node rb_node_desc;
460 struct rb_node rb_node_node;
461 struct hlist_node node_entry;
462 struct binder_proc *proc;
463 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900464 struct binder_ref_death *death;
465};
466
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900467enum binder_deferred_state {
468 BINDER_DEFERRED_PUT_FILES = 0x01,
469 BINDER_DEFERRED_FLUSH = 0x02,
470 BINDER_DEFERRED_RELEASE = 0x04,
471};
472
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700473/**
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700474 * struct binder_priority - scheduler policy and priority
475 * @sched_policy scheduler policy
476 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
477 *
478 * The binder driver supports inheriting the following scheduler policies:
479 * SCHED_NORMAL
480 * SCHED_BATCH
481 * SCHED_FIFO
482 * SCHED_RR
483 */
484struct binder_priority {
485 unsigned int sched_policy;
486 int prio;
487};
488
489/**
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700490 * struct binder_proc - binder process bookkeeping
491 * @proc_node: element for binder_procs list
492 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700493 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700494 * @nodes: rbtree of binder nodes associated with
495 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700496 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700497 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700498 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700499 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700500 * (protected by @outer_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700501 * @waiting_threads: threads currently waiting for proc work
502 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700503 * @pid PID of group_leader of process
504 * (invariant after initialized)
505 * @tsk task_struct for group_leader of process
506 * (invariant after initialized)
507 * @files files_struct for process
508 * (invariant after initialized)
509 * @deferred_work_node: element for binder_deferred_list
510 * (protected by binder_deferred_lock)
511 * @deferred_work: bitmap of deferred work to perform
512 * (protected by binder_deferred_lock)
513 * @is_dead: process is dead and awaiting free
514 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700515 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700516 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700517 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700518 * @wait: wait queue head to wait for proc work
519 * (invariant after initialized)
520 * @stats: per-process binder statistics
521 * (atomics, no lock needed)
522 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700523 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700524 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700525 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700526 * @requested_threads: number of binder threads requested but not
527 * yet started. In current implementation, can
528 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700529 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700530 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700531 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700532 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700533 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700534 * @default_priority: default scheduler priority
535 * (invariant after initialized)
536 * @debugfs_entry: debugfs node
537 * @alloc: binder allocator bookkeeping
538 * @context: binder_context for this proc
539 * (invariant after initialized)
540 * @inner_lock: can nest under outer_lock and/or node lock
541 * @outer_lock: no nesting under innor or node lock
542 * Lock order: 1) outer, 2) node, 3) inner
543 *
544 * Bookkeeping structure for binder processes
545 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900546struct binder_proc {
547 struct hlist_node proc_node;
548 struct rb_root threads;
549 struct rb_root nodes;
550 struct rb_root refs_by_desc;
551 struct rb_root refs_by_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700552 struct list_head waiting_threads;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900553 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900554 struct task_struct *tsk;
555 struct files_struct *files;
556 struct hlist_node deferred_work_node;
557 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700558 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900559
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900560 struct list_head todo;
561 wait_queue_head_t wait;
562 struct binder_stats stats;
563 struct list_head delivered_death;
564 int max_threads;
565 int requested_threads;
566 int requested_threads_started;
Todd Kjos2f993e22017-05-12 14:42:55 -0700567 int tmp_ref;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700568 struct binder_priority default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700569 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700570 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200571 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700572 spinlock_t inner_lock;
573 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900574};
575
576enum {
577 BINDER_LOOPER_STATE_REGISTERED = 0x01,
578 BINDER_LOOPER_STATE_ENTERED = 0x02,
579 BINDER_LOOPER_STATE_EXITED = 0x04,
580 BINDER_LOOPER_STATE_INVALID = 0x08,
581 BINDER_LOOPER_STATE_WAITING = 0x10,
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700582 BINDER_LOOPER_STATE_POLL = 0x20,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900583};
584
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700585/**
586 * struct binder_thread - binder thread bookkeeping
587 * @proc: binder process for this thread
588 * (invariant after initialization)
589 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700590 * (protected by @proc->inner_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700591 * @waiting_thread_node: element for @proc->waiting_threads list
592 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700593 * @pid: PID for this thread
594 * (invariant after initialization)
595 * @looper: bitmap of looping state
596 * (only accessed by this thread)
597 * @looper_needs_return: looping thread needs to exit driver
598 * (no lock needed)
599 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700600 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700601 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700602 * (protected by @proc->inner_lock)
Martijn Coenen1af61802017-10-19 15:04:46 +0200603 * @process_todo: whether work in @todo should be processed
604 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700605 * @return_error: transaction errors reported by this thread
606 * (only accessed by this thread)
607 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700608 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700609 * @wait: wait queue for thread work
610 * @stats: per-thread statistics
611 * (atomics, no lock needed)
612 * @tmp_ref: temporary reference to indicate thread is in use
613 * (atomic since @proc->inner_lock cannot
614 * always be acquired)
615 * @is_dead: thread is dead and awaiting free
616 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700617 * (protected by @proc->inner_lock)
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700618 * @task: struct task_struct for this thread
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700619 *
620 * Bookkeeping structure for binder threads.
621 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900622struct binder_thread {
623 struct binder_proc *proc;
624 struct rb_node rb_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700625 struct list_head waiting_thread_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900626 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800627 int looper; /* only modified by this thread */
628 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900629 struct binder_transaction *transaction_stack;
630 struct list_head todo;
Martijn Coenen1af61802017-10-19 15:04:46 +0200631 bool process_todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700632 struct binder_error return_error;
633 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900634 wait_queue_head_t wait;
635 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700636 atomic_t tmp_ref;
637 bool is_dead;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700638 struct task_struct *task;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900639};
640
641struct binder_transaction {
642 int debug_id;
643 struct binder_work work;
644 struct binder_thread *from;
645 struct binder_transaction *from_parent;
646 struct binder_proc *to_proc;
647 struct binder_thread *to_thread;
648 struct binder_transaction *to_parent;
649 unsigned need_reply:1;
650 /* unsigned is_dead:1; */ /* not used at the moment */
651
652 struct binder_buffer *buffer;
653 unsigned int code;
654 unsigned int flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700655 struct binder_priority priority;
656 struct binder_priority saved_priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -0700657 bool set_priority_called;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600658 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700659 /**
660 * @lock: protects @from, @to_proc, and @to_thread
661 *
662 * @from, @to_proc, and @to_thread can be set to NULL
663 * during thread teardown
664 */
665 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900666};
667
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700668/**
669 * binder_proc_lock() - Acquire outer lock for given binder_proc
670 * @proc: struct binder_proc to acquire
671 *
672 * Acquires proc->outer_lock. Used to protect binder_ref
673 * structures associated with the given proc.
674 */
675#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
676static void
677_binder_proc_lock(struct binder_proc *proc, int line)
678{
679 binder_debug(BINDER_DEBUG_SPINLOCKS,
680 "%s: line=%d\n", __func__, line);
681 spin_lock(&proc->outer_lock);
682}
683
684/**
685 * binder_proc_unlock() - Release spinlock for given binder_proc
686 * @proc: struct binder_proc to acquire
687 *
688 * Release lock acquired via binder_proc_lock()
689 */
690#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
691static void
692_binder_proc_unlock(struct binder_proc *proc, int line)
693{
694 binder_debug(BINDER_DEBUG_SPINLOCKS,
695 "%s: line=%d\n", __func__, line);
696 spin_unlock(&proc->outer_lock);
697}
698
699/**
700 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
701 * @proc: struct binder_proc to acquire
702 *
703 * Acquires proc->inner_lock. Used to protect todo lists
704 */
705#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
706static void
707_binder_inner_proc_lock(struct binder_proc *proc, int line)
708{
709 binder_debug(BINDER_DEBUG_SPINLOCKS,
710 "%s: line=%d\n", __func__, line);
711 spin_lock(&proc->inner_lock);
712}
713
714/**
715 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
716 * @proc: struct binder_proc to acquire
717 *
718 * Release lock acquired via binder_inner_proc_lock()
719 */
720#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
721static void
722_binder_inner_proc_unlock(struct binder_proc *proc, int line)
723{
724 binder_debug(BINDER_DEBUG_SPINLOCKS,
725 "%s: line=%d\n", __func__, line);
726 spin_unlock(&proc->inner_lock);
727}
728
729/**
730 * binder_node_lock() - Acquire spinlock for given binder_node
731 * @node: struct binder_node to acquire
732 *
733 * Acquires node->lock. Used to protect binder_node fields
734 */
735#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
736static void
737_binder_node_lock(struct binder_node *node, int line)
738{
739 binder_debug(BINDER_DEBUG_SPINLOCKS,
740 "%s: line=%d\n", __func__, line);
741 spin_lock(&node->lock);
742}
743
744/**
745 * binder_node_unlock() - Release spinlock for given binder_proc
746 * @node: struct binder_node to acquire
747 *
748 * Release lock acquired via binder_node_lock()
749 */
750#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
751static void
752_binder_node_unlock(struct binder_node *node, int line)
753{
754 binder_debug(BINDER_DEBUG_SPINLOCKS,
755 "%s: line=%d\n", __func__, line);
756 spin_unlock(&node->lock);
757}
758
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700759/**
760 * binder_node_inner_lock() - Acquire node and inner locks
761 * @node: struct binder_node to acquire
762 *
763 * Acquires node->lock. If node->proc also acquires
764 * proc->inner_lock. Used to protect binder_node fields
765 */
766#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
767static void
768_binder_node_inner_lock(struct binder_node *node, int line)
769{
770 binder_debug(BINDER_DEBUG_SPINLOCKS,
771 "%s: line=%d\n", __func__, line);
772 spin_lock(&node->lock);
773 if (node->proc)
774 binder_inner_proc_lock(node->proc);
775}
776
777/**
778 * binder_node_unlock() - Release node and inner locks
779 * @node: struct binder_node to acquire
780 *
781 * Release lock acquired via binder_node_lock()
782 */
783#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
784static void
785_binder_node_inner_unlock(struct binder_node *node, int line)
786{
787 struct binder_proc *proc = node->proc;
788
789 binder_debug(BINDER_DEBUG_SPINLOCKS,
790 "%s: line=%d\n", __func__, line);
791 if (proc)
792 binder_inner_proc_unlock(proc);
793 spin_unlock(&node->lock);
794}
795
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700796static bool binder_worklist_empty_ilocked(struct list_head *list)
797{
798 return list_empty(list);
799}
800
801/**
802 * binder_worklist_empty() - Check if no items on the work list
803 * @proc: binder_proc associated with list
804 * @list: list to check
805 *
806 * Return: true if there are no items on list, else false
807 */
808static bool binder_worklist_empty(struct binder_proc *proc,
809 struct list_head *list)
810{
811 bool ret;
812
813 binder_inner_proc_lock(proc);
814 ret = binder_worklist_empty_ilocked(list);
815 binder_inner_proc_unlock(proc);
816 return ret;
817}
818
Martijn Coenen1af61802017-10-19 15:04:46 +0200819/**
820 * binder_enqueue_work_ilocked() - Add an item to the work list
821 * @work: struct binder_work to add to list
822 * @target_list: list to add work to
823 *
824 * Adds the work to the specified list. Asserts that work
825 * is not already on a list.
826 *
827 * Requires the proc->inner_lock to be held.
828 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700829static void
830binder_enqueue_work_ilocked(struct binder_work *work,
831 struct list_head *target_list)
832{
833 BUG_ON(target_list == NULL);
834 BUG_ON(work->entry.next && !list_empty(&work->entry));
835 list_add_tail(&work->entry, target_list);
836}
837
838/**
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100839 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
Martijn Coenen1af61802017-10-19 15:04:46 +0200840 * @thread: thread to queue work to
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700841 * @work: struct binder_work to add to list
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700842 *
Martijn Coenen1af61802017-10-19 15:04:46 +0200843 * Adds the work to the todo list of the thread. Doesn't set the process_todo
844 * flag, which means that (if it wasn't already set) the thread will go to
845 * sleep without handling this work when it calls read.
846 *
847 * Requires the proc->inner_lock to be held.
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700848 */
849static void
Martijn Coenendac2e9c2017-11-13 09:55:21 +0100850binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
851 struct binder_work *work)
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700852{
Martijn Coenen1af61802017-10-19 15:04:46 +0200853 binder_enqueue_work_ilocked(work, &thread->todo);
854}
855
856/**
857 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
858 * @thread: thread to queue work to
859 * @work: struct binder_work to add to list
860 *
861 * Adds the work to the todo list of the thread, and enables processing
862 * of the todo queue.
863 *
864 * Requires the proc->inner_lock to be held.
865 */
866static void
867binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
868 struct binder_work *work)
869{
870 binder_enqueue_work_ilocked(work, &thread->todo);
871 thread->process_todo = true;
872}
873
874/**
875 * binder_enqueue_thread_work() - Add an item to the thread work list
876 * @thread: thread to queue work to
877 * @work: struct binder_work to add to list
878 *
879 * Adds the work to the todo list of the thread, and enables processing
880 * of the todo queue.
881 */
882static void
883binder_enqueue_thread_work(struct binder_thread *thread,
884 struct binder_work *work)
885{
886 binder_inner_proc_lock(thread->proc);
887 binder_enqueue_thread_work_ilocked(thread, work);
888 binder_inner_proc_unlock(thread->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700889}
890
891static void
892binder_dequeue_work_ilocked(struct binder_work *work)
893{
894 list_del_init(&work->entry);
895}
896
897/**
898 * binder_dequeue_work() - Removes an item from the work list
899 * @proc: binder_proc associated with list
900 * @work: struct binder_work to remove from list
901 *
902 * Removes the specified work item from whatever list it is on.
903 * Can safely be called if work is not on any list.
904 */
905static void
906binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
907{
908 binder_inner_proc_lock(proc);
909 binder_dequeue_work_ilocked(work);
910 binder_inner_proc_unlock(proc);
911}
912
913static struct binder_work *binder_dequeue_work_head_ilocked(
914 struct list_head *list)
915{
916 struct binder_work *w;
917
918 w = list_first_entry_or_null(list, struct binder_work, entry);
919 if (w)
920 list_del_init(&w->entry);
921 return w;
922}
923
924/**
925 * binder_dequeue_work_head() - Dequeues the item at head of list
926 * @proc: binder_proc associated with list
927 * @list: list to dequeue head
928 *
929 * Removes the head of the list if there are items on the list
930 *
931 * Return: pointer dequeued binder_work, NULL if list was empty
932 */
933static struct binder_work *binder_dequeue_work_head(
934 struct binder_proc *proc,
935 struct list_head *list)
936{
937 struct binder_work *w;
938
939 binder_inner_proc_lock(proc);
940 w = binder_dequeue_work_head_ilocked(list);
941 binder_inner_proc_unlock(proc);
942 return w;
943}
944
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900945static void
946binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700947static void binder_free_thread(struct binder_thread *thread);
948static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700949static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900950
Sachin Kamatefde99c2012-08-17 16:39:36 +0530951static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900952{
953 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900954 unsigned long rlim_cur;
955 unsigned long irqs;
956
957 if (files == NULL)
958 return -ESRCH;
959
Al Virodcfadfa2012-08-12 17:27:30 -0400960 if (!lock_task_sighand(proc->tsk, &irqs))
961 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900962
Al Virodcfadfa2012-08-12 17:27:30 -0400963 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
964 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900965
Al Virodcfadfa2012-08-12 17:27:30 -0400966 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900967}
968
969/*
970 * copied from fd_install
971 */
972static void task_fd_install(
973 struct binder_proc *proc, unsigned int fd, struct file *file)
974{
Al Virof869e8a2012-08-15 21:06:33 -0400975 if (proc->files)
976 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900977}
978
979/*
980 * copied from sys_close
981 */
982static long task_close_fd(struct binder_proc *proc, unsigned int fd)
983{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900984 int retval;
985
Al Viro483ce1d2012-08-19 12:04:24 -0400986 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900987 return -ESRCH;
988
Al Viro483ce1d2012-08-19 12:04:24 -0400989 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900990 /* can't restart close syscall because file table entry was cleared */
991 if (unlikely(retval == -ERESTARTSYS ||
992 retval == -ERESTARTNOINTR ||
993 retval == -ERESTARTNOHAND ||
994 retval == -ERESTART_RESTARTBLOCK))
995 retval = -EINTR;
996
997 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900998}
999
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001000static bool binder_has_work_ilocked(struct binder_thread *thread,
1001 bool do_proc_work)
1002{
Martijn Coenen1af61802017-10-19 15:04:46 +02001003 return thread->process_todo ||
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001004 thread->looper_need_return ||
1005 (do_proc_work &&
1006 !binder_worklist_empty_ilocked(&thread->proc->todo));
1007}
1008
1009static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1010{
1011 bool has_work;
1012
1013 binder_inner_proc_lock(thread->proc);
1014 has_work = binder_has_work_ilocked(thread, do_proc_work);
1015 binder_inner_proc_unlock(thread->proc);
1016
1017 return has_work;
1018}
1019
1020static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1021{
1022 return !thread->transaction_stack &&
1023 binder_worklist_empty_ilocked(&thread->todo) &&
1024 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1025 BINDER_LOOPER_STATE_REGISTERED));
1026}
1027
1028static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1029 bool sync)
1030{
1031 struct rb_node *n;
1032 struct binder_thread *thread;
1033
1034 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1035 thread = rb_entry(n, struct binder_thread, rb_node);
1036 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1037 binder_available_for_proc_work_ilocked(thread)) {
1038 if (sync)
1039 wake_up_interruptible_sync(&thread->wait);
1040 else
1041 wake_up_interruptible(&thread->wait);
1042 }
1043 }
1044}
1045
Martijn Coenen053be422017-06-06 15:17:46 -07001046/**
1047 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1048 * @proc: process to select a thread from
1049 *
1050 * Note that calling this function moves the thread off the waiting_threads
1051 * list, so it can only be woken up by the caller of this function, or a
1052 * signal. Therefore, callers *should* always wake up the thread this function
1053 * returns.
1054 *
1055 * Return: If there's a thread currently waiting for process work,
1056 * returns that thread. Otherwise returns NULL.
1057 */
1058static struct binder_thread *
1059binder_select_thread_ilocked(struct binder_proc *proc)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001060{
1061 struct binder_thread *thread;
1062
Martijn Coenened323352017-07-27 23:52:24 +02001063 assert_spin_locked(&proc->inner_lock);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001064 thread = list_first_entry_or_null(&proc->waiting_threads,
1065 struct binder_thread,
1066 waiting_thread_node);
1067
Martijn Coenen053be422017-06-06 15:17:46 -07001068 if (thread)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001069 list_del_init(&thread->waiting_thread_node);
Martijn Coenen053be422017-06-06 15:17:46 -07001070
1071 return thread;
1072}
1073
1074/**
1075 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1076 * @proc: process to wake up a thread in
1077 * @thread: specific thread to wake-up (may be NULL)
1078 * @sync: whether to do a synchronous wake-up
1079 *
1080 * This function wakes up a thread in the @proc process.
1081 * The caller may provide a specific thread to wake-up in
1082 * the @thread parameter. If @thread is NULL, this function
1083 * will wake up threads that have called poll().
1084 *
1085 * Note that for this function to work as expected, callers
1086 * should first call binder_select_thread() to find a thread
1087 * to handle the work (if they don't have a thread already),
1088 * and pass the result into the @thread parameter.
1089 */
1090static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1091 struct binder_thread *thread,
1092 bool sync)
1093{
Martijn Coenened323352017-07-27 23:52:24 +02001094 assert_spin_locked(&proc->inner_lock);
Martijn Coenen053be422017-06-06 15:17:46 -07001095
1096 if (thread) {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001097 if (sync)
1098 wake_up_interruptible_sync(&thread->wait);
1099 else
1100 wake_up_interruptible(&thread->wait);
1101 return;
1102 }
1103
1104 /* Didn't find a thread waiting for proc work; this can happen
1105 * in two scenarios:
1106 * 1. All threads are busy handling transactions
1107 * In that case, one of those threads should call back into
1108 * the kernel driver soon and pick up this work.
1109 * 2. Threads are using the (e)poll interface, in which case
1110 * they may be blocked on the waitqueue without having been
1111 * added to waiting_threads. For this case, we just iterate
1112 * over all threads not handling transaction work, and
1113 * wake them all up. We wake all because we don't know whether
1114 * a thread that called into (e)poll is handling non-binder
1115 * work currently.
1116 */
1117 binder_wakeup_poll_threads_ilocked(proc, sync);
1118}
1119
Martijn Coenen053be422017-06-06 15:17:46 -07001120static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1121{
1122 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1123
1124 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1125}
1126
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001127static bool is_rt_policy(int policy)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001128{
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001129 return policy == SCHED_FIFO || policy == SCHED_RR;
1130}
Seunghun Lee10f62862014-05-01 01:30:23 +09001131
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001132static bool is_fair_policy(int policy)
1133{
1134 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1135}
1136
1137static bool binder_supported_policy(int policy)
1138{
1139 return is_fair_policy(policy) || is_rt_policy(policy);
1140}
1141
1142static int to_userspace_prio(int policy, int kernel_priority)
1143{
1144 if (is_fair_policy(policy))
1145 return PRIO_TO_NICE(kernel_priority);
1146 else
1147 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1148}
1149
1150static int to_kernel_prio(int policy, int user_priority)
1151{
1152 if (is_fair_policy(policy))
1153 return NICE_TO_PRIO(user_priority);
1154 else
1155 return MAX_USER_RT_PRIO - 1 - user_priority;
1156}
1157
Martijn Coenenecd972d2017-05-26 10:48:56 -07001158static void binder_do_set_priority(struct task_struct *task,
1159 struct binder_priority desired,
1160 bool verify)
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001161{
1162 int priority; /* user-space prio value */
1163 bool has_cap_nice;
1164 unsigned int policy = desired.sched_policy;
1165
1166 if (task->policy == policy && task->normal_prio == desired.prio)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001167 return;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001168
1169 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1170
1171 priority = to_userspace_prio(policy, desired.prio);
1172
Martijn Coenenecd972d2017-05-26 10:48:56 -07001173 if (verify && is_rt_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001174 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1175
1176 if (max_rtprio == 0) {
1177 policy = SCHED_NORMAL;
1178 priority = MIN_NICE;
1179 } else if (priority > max_rtprio) {
1180 priority = max_rtprio;
1181 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001182 }
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001183
Martijn Coenenecd972d2017-05-26 10:48:56 -07001184 if (verify && is_fair_policy(policy) && !has_cap_nice) {
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001185 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1186
1187 if (min_nice > MAX_NICE) {
1188 binder_user_error("%d RLIMIT_NICE not set\n",
1189 task->pid);
1190 return;
1191 } else if (priority < min_nice) {
1192 priority = min_nice;
1193 }
1194 }
1195
1196 if (policy != desired.sched_policy ||
1197 to_kernel_prio(policy, priority) != desired.prio)
1198 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1199 "%d: priority %d not allowed, using %d instead\n",
1200 task->pid, desired.prio,
1201 to_kernel_prio(policy, priority));
1202
Martijn Coenen81402ea2017-05-08 09:33:22 -07001203 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1204 to_kernel_prio(policy, priority),
1205 desired.prio);
1206
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001207 /* Set the actual priority */
1208 if (task->policy != policy || is_rt_policy(policy)) {
1209 struct sched_param params;
1210
1211 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1212
1213 sched_setscheduler_nocheck(task,
1214 policy | SCHED_RESET_ON_FORK,
1215 &params);
1216 }
1217 if (is_fair_policy(policy))
1218 set_user_nice(task, priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001219}
1220
Martijn Coenenecd972d2017-05-26 10:48:56 -07001221static void binder_set_priority(struct task_struct *task,
1222 struct binder_priority desired)
1223{
1224 binder_do_set_priority(task, desired, /* verify = */ true);
1225}
1226
1227static void binder_restore_priority(struct task_struct *task,
1228 struct binder_priority desired)
1229{
1230 binder_do_set_priority(task, desired, /* verify = */ false);
1231}
1232
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001233static void binder_transaction_priority(struct task_struct *task,
1234 struct binder_transaction *t,
Martijn Coenenc46810c2017-06-23 10:13:43 -07001235 struct binder_priority node_prio,
1236 bool inherit_rt)
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001237{
Ganesh Mahendran9add7c42017-09-27 15:12:25 +08001238 struct binder_priority desired_prio = t->priority;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001239
1240 if (t->set_priority_called)
1241 return;
1242
1243 t->set_priority_called = true;
1244 t->saved_priority.sched_policy = task->policy;
1245 t->saved_priority.prio = task->normal_prio;
1246
Martijn Coenenc46810c2017-06-23 10:13:43 -07001247 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1248 desired_prio.prio = NICE_TO_PRIO(0);
1249 desired_prio.sched_policy = SCHED_NORMAL;
Martijn Coenenc46810c2017-06-23 10:13:43 -07001250 }
Martijn Coenen07a30fe2017-06-07 10:02:12 -07001251
1252 if (node_prio.prio < t->priority.prio ||
1253 (node_prio.prio == t->priority.prio &&
1254 node_prio.sched_policy == SCHED_FIFO)) {
1255 /*
1256 * In case the minimum priority on the node is
1257 * higher (lower value), use that priority. If
1258 * the priority is the same, but the node uses
1259 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1260 * run unbounded, unlike SCHED_RR.
1261 */
1262 desired_prio = node_prio;
1263 }
1264
1265 binder_set_priority(task, desired_prio);
1266}
1267
Todd Kjos425d23f2017-06-12 12:07:26 -07001268static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1269 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001270{
1271 struct rb_node *n = proc->nodes.rb_node;
1272 struct binder_node *node;
1273
Martijn Coenened323352017-07-27 23:52:24 +02001274 assert_spin_locked(&proc->inner_lock);
Todd Kjos425d23f2017-06-12 12:07:26 -07001275
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001276 while (n) {
1277 node = rb_entry(n, struct binder_node, rb_node);
1278
1279 if (ptr < node->ptr)
1280 n = n->rb_left;
1281 else if (ptr > node->ptr)
1282 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -07001283 else {
1284 /*
1285 * take an implicit weak reference
1286 * to ensure node stays alive until
1287 * call to binder_put_node()
1288 */
Todd Kjos425d23f2017-06-12 12:07:26 -07001289 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001290 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001291 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001292 }
1293 return NULL;
1294}
1295
Todd Kjos425d23f2017-06-12 12:07:26 -07001296static struct binder_node *binder_get_node(struct binder_proc *proc,
1297 binder_uintptr_t ptr)
1298{
1299 struct binder_node *node;
1300
1301 binder_inner_proc_lock(proc);
1302 node = binder_get_node_ilocked(proc, ptr);
1303 binder_inner_proc_unlock(proc);
1304 return node;
1305}
1306
1307static struct binder_node *binder_init_node_ilocked(
1308 struct binder_proc *proc,
1309 struct binder_node *new_node,
1310 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001311{
1312 struct rb_node **p = &proc->nodes.rb_node;
1313 struct rb_node *parent = NULL;
1314 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001315 binder_uintptr_t ptr = fp ? fp->binder : 0;
1316 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1317 __u32 flags = fp ? fp->flags : 0;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001318 s8 priority;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001319
Martijn Coenened323352017-07-27 23:52:24 +02001320 assert_spin_locked(&proc->inner_lock);
1321
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001322 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001323
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001324 parent = *p;
1325 node = rb_entry(parent, struct binder_node, rb_node);
1326
1327 if (ptr < node->ptr)
1328 p = &(*p)->rb_left;
1329 else if (ptr > node->ptr)
1330 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001331 else {
1332 /*
1333 * A matching node is already in
1334 * the rb tree. Abandon the init
1335 * and return it.
1336 */
1337 binder_inc_node_tmpref_ilocked(node);
1338 return node;
1339 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001340 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001341 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001342 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001343 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001344 rb_link_node(&node->rb_node, parent, p);
1345 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001346 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001347 node->proc = proc;
1348 node->ptr = ptr;
1349 node->cookie = cookie;
1350 node->work.type = BINDER_WORK_NODE;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001351 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
Ganesh Mahendran6cd26312017-09-26 17:56:25 +08001352 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
Martijn Coenen6aac9792017-06-07 09:29:14 -07001353 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1354 node->min_priority = to_kernel_prio(node->sched_policy, priority);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001355 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Martijn Coenenc46810c2017-06-23 10:13:43 -07001356 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001357 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001358 INIT_LIST_HEAD(&node->work.entry);
1359 INIT_LIST_HEAD(&node->async_todo);
1360 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001361 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001362 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001363 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001364
1365 return node;
1366}
1367
1368static struct binder_node *binder_new_node(struct binder_proc *proc,
1369 struct flat_binder_object *fp)
1370{
1371 struct binder_node *node;
1372 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1373
1374 if (!new_node)
1375 return NULL;
1376 binder_inner_proc_lock(proc);
1377 node = binder_init_node_ilocked(proc, new_node, fp);
1378 binder_inner_proc_unlock(proc);
1379 if (node != new_node)
1380 /*
1381 * The node was already added by another thread
1382 */
1383 kfree(new_node);
1384
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001385 return node;
1386}
1387
Todd Kjose7f23ed2017-03-21 13:06:01 -07001388static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001389{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001390 kfree(node);
1391 binder_stats_deleted(BINDER_STAT_NODE);
1392}
1393
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001394static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1395 int internal,
1396 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001397{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001398 struct binder_proc *proc = node->proc;
1399
Martijn Coenened323352017-07-27 23:52:24 +02001400 assert_spin_locked(&node->lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001401 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001402 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001403 if (strong) {
1404 if (internal) {
1405 if (target_list == NULL &&
1406 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001407 !(node->proc &&
1408 node == node->proc->context->
1409 binder_context_mgr_node &&
1410 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301411 pr_err("invalid inc strong node for %d\n",
1412 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001413 return -EINVAL;
1414 }
1415 node->internal_strong_refs++;
1416 } else
1417 node->local_strong_refs++;
1418 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001419 binder_dequeue_work_ilocked(&node->work);
Martijn Coenen1af61802017-10-19 15:04:46 +02001420 /*
1421 * Note: this function is the only place where we queue
1422 * directly to a thread->todo without using the
1423 * corresponding binder_enqueue_thread_work() helper
1424 * functions; in this case it's ok to not set the
1425 * process_todo flag, since we know this node work will
1426 * always be followed by other work that starts queue
1427 * processing: in case of synchronous transactions, a
1428 * BR_REPLY or BR_ERROR; in case of oneway
1429 * transactions, a BR_TRANSACTION_COMPLETE.
1430 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001431 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001432 }
1433 } else {
1434 if (!internal)
1435 node->local_weak_refs++;
1436 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1437 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301438 pr_err("invalid inc weak node for %d\n",
1439 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001440 return -EINVAL;
1441 }
Martijn Coenen1af61802017-10-19 15:04:46 +02001442 /*
1443 * See comment above
1444 */
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001445 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001446 }
1447 }
1448 return 0;
1449}
1450
Todd Kjose7f23ed2017-03-21 13:06:01 -07001451static int binder_inc_node(struct binder_node *node, int strong, int internal,
1452 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001453{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001454 int ret;
1455
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001456 binder_node_inner_lock(node);
1457 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1458 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001459
1460 return ret;
1461}
1462
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001463static bool binder_dec_node_nilocked(struct binder_node *node,
1464 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001465{
1466 struct binder_proc *proc = node->proc;
1467
Martijn Coenened323352017-07-27 23:52:24 +02001468 assert_spin_locked(&node->lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001469 if (proc)
Martijn Coenened323352017-07-27 23:52:24 +02001470 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001471 if (strong) {
1472 if (internal)
1473 node->internal_strong_refs--;
1474 else
1475 node->local_strong_refs--;
1476 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001477 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001478 } else {
1479 if (!internal)
1480 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001481 if (node->local_weak_refs || node->tmp_refs ||
1482 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001483 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001484 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001485
1486 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001487 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001488 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07001489 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001490 }
1491 } else {
1492 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001493 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001494 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001495 binder_dequeue_work_ilocked(&node->work);
1496 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001497 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301498 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001499 node->debug_id);
1500 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001501 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001502 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001503 /*
1504 * tmp_refs could have changed so
1505 * check it again
1506 */
1507 if (node->tmp_refs) {
1508 spin_unlock(&binder_dead_nodes_lock);
1509 return false;
1510 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001511 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001512 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001513 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301514 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001515 node->debug_id);
1516 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001517 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001518 }
1519 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001520 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001521}
1522
Todd Kjose7f23ed2017-03-21 13:06:01 -07001523static void binder_dec_node(struct binder_node *node, int strong, int internal)
1524{
1525 bool free_node;
1526
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001527 binder_node_inner_lock(node);
1528 free_node = binder_dec_node_nilocked(node, strong, internal);
1529 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001530 if (free_node)
1531 binder_free_node(node);
1532}
1533
1534static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001535{
1536 /*
1537 * No call to binder_inc_node() is needed since we
1538 * don't need to inform userspace of any changes to
1539 * tmp_refs
1540 */
1541 node->tmp_refs++;
1542}
1543
1544/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001545 * binder_inc_node_tmpref() - take a temporary reference on node
1546 * @node: node to reference
1547 *
1548 * Take reference on node to prevent the node from being freed
1549 * while referenced only by a local variable. The inner lock is
1550 * needed to serialize with the node work on the queue (which
1551 * isn't needed after the node is dead). If the node is dead
1552 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1553 * node->tmp_refs against dead-node-only cases where the node
1554 * lock cannot be acquired (eg traversing the dead node list to
1555 * print nodes)
1556 */
1557static void binder_inc_node_tmpref(struct binder_node *node)
1558{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001559 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001560 if (node->proc)
1561 binder_inner_proc_lock(node->proc);
1562 else
1563 spin_lock(&binder_dead_nodes_lock);
1564 binder_inc_node_tmpref_ilocked(node);
1565 if (node->proc)
1566 binder_inner_proc_unlock(node->proc);
1567 else
1568 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001569 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001570}
1571
1572/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001573 * binder_dec_node_tmpref() - remove a temporary reference on node
1574 * @node: node to reference
1575 *
1576 * Release temporary reference on node taken via binder_inc_node_tmpref()
1577 */
1578static void binder_dec_node_tmpref(struct binder_node *node)
1579{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001580 bool free_node;
1581
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001582 binder_node_inner_lock(node);
1583 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001584 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001585 node->tmp_refs--;
1586 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001587 if (!node->proc)
1588 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001589 /*
1590 * Call binder_dec_node() to check if all refcounts are 0
1591 * and cleanup is needed. Calling with strong=0 and internal=1
1592 * causes no actual reference to be released in binder_dec_node().
1593 * If that changes, a change is needed here too.
1594 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001595 free_node = binder_dec_node_nilocked(node, 0, 1);
1596 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001597 if (free_node)
1598 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001599}
1600
1601static void binder_put_node(struct binder_node *node)
1602{
1603 binder_dec_node_tmpref(node);
1604}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001605
Todd Kjos5346bf32016-10-20 16:43:34 -07001606static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1607 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001608{
1609 struct rb_node *n = proc->refs_by_desc.rb_node;
1610 struct binder_ref *ref;
1611
1612 while (n) {
1613 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1614
Todd Kjosb0117bb2017-05-08 09:16:27 -07001615 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001616 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001617 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001618 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001619 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001620 binder_user_error("tried to use weak ref as strong ref\n");
1621 return NULL;
1622 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001623 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001624 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001625 }
1626 return NULL;
1627}
1628
Todd Kjosb0117bb2017-05-08 09:16:27 -07001629/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001630 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001631 * @proc: binder_proc that owns the ref
1632 * @node: binder_node of target
1633 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1634 *
1635 * Look up the ref for the given node and return it if it exists
1636 *
1637 * If it doesn't exist and the caller provides a newly allocated
1638 * ref, initialize the fields of the newly allocated ref and insert
1639 * into the given proc rb_trees and node refs list.
1640 *
1641 * Return: the ref for node. It is possible that another thread
1642 * allocated/initialized the ref first in which case the
1643 * returned ref would be different than the passed-in
1644 * new_ref. new_ref must be kfree'd by the caller in
1645 * this case.
1646 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001647static struct binder_ref *binder_get_ref_for_node_olocked(
1648 struct binder_proc *proc,
1649 struct binder_node *node,
1650 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001651{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001652 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001653 struct rb_node **p = &proc->refs_by_node.rb_node;
1654 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001655 struct binder_ref *ref;
1656 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001657
1658 while (*p) {
1659 parent = *p;
1660 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1661
1662 if (node < ref->node)
1663 p = &(*p)->rb_left;
1664 else if (node > ref->node)
1665 p = &(*p)->rb_right;
1666 else
1667 return ref;
1668 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001669 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001670 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001671
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001672 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001673 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001674 new_ref->proc = proc;
1675 new_ref->node = node;
1676 rb_link_node(&new_ref->rb_node_node, parent, p);
1677 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1678
Todd Kjosb0117bb2017-05-08 09:16:27 -07001679 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001680 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1681 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001682 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001683 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001684 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001685 }
1686
1687 p = &proc->refs_by_desc.rb_node;
1688 while (*p) {
1689 parent = *p;
1690 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1691
Todd Kjosb0117bb2017-05-08 09:16:27 -07001692 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001693 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001694 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001695 p = &(*p)->rb_right;
1696 else
1697 BUG();
1698 }
1699 rb_link_node(&new_ref->rb_node_desc, parent, p);
1700 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001701
1702 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001703 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001704
Todd Kjos4cbe5752017-05-01 17:21:51 -07001705 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1706 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001707 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001708 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001709 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001710 return new_ref;
1711}
1712
Todd Kjos5346bf32016-10-20 16:43:34 -07001713static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001714{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001715 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001716
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001717 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301718 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001719 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301720 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001721
1722 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1723 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001724
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001725 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001726 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001727 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001728
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001729 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001730 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1731 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001732 /*
1733 * Clear ref->node unless we want the caller to free the node
1734 */
1735 if (!delete_node) {
1736 /*
1737 * The caller uses ref->node to determine
1738 * whether the node needs to be freed. Clear
1739 * it since the node is still alive.
1740 */
1741 ref->node = NULL;
1742 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001743
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001744 if (ref->death) {
1745 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301746 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001747 ref->proc->pid, ref->data.debug_id,
1748 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001749 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001750 binder_stats_deleted(BINDER_STAT_DEATH);
1751 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001752 binder_stats_deleted(BINDER_STAT_REF);
1753}
1754
Todd Kjosb0117bb2017-05-08 09:16:27 -07001755/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001756 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001757 * @ref: ref to be incremented
1758 * @strong: if true, strong increment, else weak
1759 * @target_list: list to queue node work on
1760 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001761 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001762 *
1763 * Return: 0, if successful, else errno
1764 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001765static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1766 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001767{
1768 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001769
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001770 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001771 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001772 ret = binder_inc_node(ref->node, 1, 1, target_list);
1773 if (ret)
1774 return ret;
1775 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001776 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001777 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001778 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001779 ret = binder_inc_node(ref->node, 0, 1, target_list);
1780 if (ret)
1781 return ret;
1782 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001783 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001784 }
1785 return 0;
1786}
1787
Todd Kjosb0117bb2017-05-08 09:16:27 -07001788/**
1789 * binder_dec_ref() - dec the ref for given handle
1790 * @ref: ref to be decremented
1791 * @strong: if true, strong decrement, else weak
1792 *
1793 * Decrement the ref.
1794 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001795 * Return: true if ref is cleaned up and ready to be freed
1796 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001797static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001798{
1799 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001800 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301801 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001802 ref->proc->pid, ref->data.debug_id,
1803 ref->data.desc, ref->data.strong,
1804 ref->data.weak);
1805 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001806 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001807 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001808 if (ref->data.strong == 0)
1809 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001810 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001811 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301812 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001813 ref->proc->pid, ref->data.debug_id,
1814 ref->data.desc, ref->data.strong,
1815 ref->data.weak);
1816 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001817 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001818 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001819 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001820 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001821 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001822 return true;
1823 }
1824 return false;
1825}
1826
1827/**
1828 * binder_get_node_from_ref() - get the node from the given proc/desc
1829 * @proc: proc containing the ref
1830 * @desc: the handle associated with the ref
1831 * @need_strong_ref: if true, only return node if ref is strong
1832 * @rdata: the id/refcount data for the ref
1833 *
1834 * Given a proc and ref handle, return the associated binder_node
1835 *
1836 * Return: a binder_node or NULL if not found or not strong when strong required
1837 */
1838static struct binder_node *binder_get_node_from_ref(
1839 struct binder_proc *proc,
1840 u32 desc, bool need_strong_ref,
1841 struct binder_ref_data *rdata)
1842{
1843 struct binder_node *node;
1844 struct binder_ref *ref;
1845
Todd Kjos5346bf32016-10-20 16:43:34 -07001846 binder_proc_lock(proc);
1847 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001848 if (!ref)
1849 goto err_no_ref;
1850 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001851 /*
1852 * Take an implicit reference on the node to ensure
1853 * it stays alive until the call to binder_put_node()
1854 */
1855 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001856 if (rdata)
1857 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001858 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001859
1860 return node;
1861
1862err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001863 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001864 return NULL;
1865}
1866
1867/**
1868 * binder_free_ref() - free the binder_ref
1869 * @ref: ref to free
1870 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001871 * Free the binder_ref. Free the binder_node indicated by ref->node
1872 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001873 */
1874static void binder_free_ref(struct binder_ref *ref)
1875{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001876 if (ref->node)
1877 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001878 kfree(ref->death);
1879 kfree(ref);
1880}
1881
1882/**
1883 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1884 * @proc: proc containing the ref
1885 * @desc: the handle associated with the ref
1886 * @increment: true=inc reference, false=dec reference
1887 * @strong: true=strong reference, false=weak reference
1888 * @rdata: the id/refcount data for the ref
1889 *
1890 * Given a proc and ref handle, increment or decrement the ref
1891 * according to "increment" arg.
1892 *
1893 * Return: 0 if successful, else errno
1894 */
1895static int binder_update_ref_for_handle(struct binder_proc *proc,
1896 uint32_t desc, bool increment, bool strong,
1897 struct binder_ref_data *rdata)
1898{
1899 int ret = 0;
1900 struct binder_ref *ref;
1901 bool delete_ref = false;
1902
Todd Kjos5346bf32016-10-20 16:43:34 -07001903 binder_proc_lock(proc);
1904 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001905 if (!ref) {
1906 ret = -EINVAL;
1907 goto err_no_ref;
1908 }
1909 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001910 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001911 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001912 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001913
1914 if (rdata)
1915 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001916 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001917
1918 if (delete_ref)
1919 binder_free_ref(ref);
1920 return ret;
1921
1922err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001923 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001924 return ret;
1925}
1926
1927/**
1928 * binder_dec_ref_for_handle() - dec the ref for given handle
1929 * @proc: proc containing the ref
1930 * @desc: the handle associated with the ref
1931 * @strong: true=strong reference, false=weak reference
1932 * @rdata: the id/refcount data for the ref
1933 *
1934 * Just calls binder_update_ref_for_handle() to decrement the ref.
1935 *
1936 * Return: 0 if successful, else errno
1937 */
1938static int binder_dec_ref_for_handle(struct binder_proc *proc,
1939 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1940{
1941 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1942}
1943
1944
1945/**
1946 * binder_inc_ref_for_node() - increment the ref for given proc/node
1947 * @proc: proc containing the ref
1948 * @node: target node
1949 * @strong: true=strong reference, false=weak reference
1950 * @target_list: worklist to use if node is incremented
1951 * @rdata: the id/refcount data for the ref
1952 *
1953 * Given a proc and node, increment the ref. Create the ref if it
1954 * doesn't already exist
1955 *
1956 * Return: 0 if successful, else errno
1957 */
1958static int binder_inc_ref_for_node(struct binder_proc *proc,
1959 struct binder_node *node,
1960 bool strong,
1961 struct list_head *target_list,
1962 struct binder_ref_data *rdata)
1963{
1964 struct binder_ref *ref;
1965 struct binder_ref *new_ref = NULL;
1966 int ret = 0;
1967
Todd Kjos5346bf32016-10-20 16:43:34 -07001968 binder_proc_lock(proc);
1969 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001970 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001971 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001972 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1973 if (!new_ref)
1974 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001975 binder_proc_lock(proc);
1976 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001977 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001978 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001979 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001980 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001981 if (new_ref && ref != new_ref)
1982 /*
1983 * Another thread created the ref first so
1984 * free the one we allocated
1985 */
1986 kfree(new_ref);
1987 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001988}
1989
Martijn Coenen995a36e2017-06-02 13:36:52 -07001990static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1991 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001992{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001993 BUG_ON(!target_thread);
Martijn Coenened323352017-07-27 23:52:24 +02001994 assert_spin_locked(&target_thread->proc->inner_lock);
Todd Kjos21ef40a2017-03-30 18:02:13 -07001995 BUG_ON(target_thread->transaction_stack != t);
1996 BUG_ON(target_thread->transaction_stack->from != target_thread);
1997 target_thread->transaction_stack =
1998 target_thread->transaction_stack->from_parent;
1999 t->from = NULL;
2000}
2001
Todd Kjos2f993e22017-05-12 14:42:55 -07002002/**
2003 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2004 * @thread: thread to decrement
2005 *
2006 * A thread needs to be kept alive while being used to create or
2007 * handle a transaction. binder_get_txn_from() is used to safely
2008 * extract t->from from a binder_transaction and keep the thread
2009 * indicated by t->from from being freed. When done with that
2010 * binder_thread, this function is called to decrement the
2011 * tmp_ref and free if appropriate (thread has been released
2012 * and no transaction being processed by the driver)
2013 */
2014static void binder_thread_dec_tmpref(struct binder_thread *thread)
2015{
2016 /*
2017 * atomic is used to protect the counter value while
2018 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07002019 */
Todd Kjosb4827902017-05-25 15:52:17 -07002020 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002021 atomic_dec(&thread->tmp_ref);
2022 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07002023 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002024 binder_free_thread(thread);
2025 return;
2026 }
Todd Kjosb4827902017-05-25 15:52:17 -07002027 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002028}
2029
2030/**
2031 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2032 * @proc: proc to decrement
2033 *
2034 * A binder_proc needs to be kept alive while being used to create or
2035 * handle a transaction. proc->tmp_ref is incremented when
2036 * creating a new transaction or the binder_proc is currently in-use
2037 * by threads that are being released. When done with the binder_proc,
2038 * this function is called to decrement the counter and free the
2039 * proc if appropriate (proc has been released, all threads have
2040 * been released and not currenly in-use to process a transaction).
2041 */
2042static void binder_proc_dec_tmpref(struct binder_proc *proc)
2043{
Todd Kjosb4827902017-05-25 15:52:17 -07002044 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002045 proc->tmp_ref--;
2046 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2047 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07002048 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002049 binder_free_proc(proc);
2050 return;
2051 }
Todd Kjosb4827902017-05-25 15:52:17 -07002052 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002053}
2054
2055/**
2056 * binder_get_txn_from() - safely extract the "from" thread in transaction
2057 * @t: binder transaction for t->from
2058 *
2059 * Atomically return the "from" thread and increment the tmp_ref
2060 * count for the thread to ensure it stays alive until
2061 * binder_thread_dec_tmpref() is called.
2062 *
2063 * Return: the value of t->from
2064 */
2065static struct binder_thread *binder_get_txn_from(
2066 struct binder_transaction *t)
2067{
2068 struct binder_thread *from;
2069
2070 spin_lock(&t->lock);
2071 from = t->from;
2072 if (from)
2073 atomic_inc(&from->tmp_ref);
2074 spin_unlock(&t->lock);
2075 return from;
2076}
2077
Martijn Coenen995a36e2017-06-02 13:36:52 -07002078/**
2079 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2080 * @t: binder transaction for t->from
2081 *
2082 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2083 * to guarantee that the thread cannot be released while operating on it.
2084 * The caller must call binder_inner_proc_unlock() to release the inner lock
2085 * as well as call binder_dec_thread_txn() to release the reference.
2086 *
2087 * Return: the value of t->from
2088 */
2089static struct binder_thread *binder_get_txn_from_and_acq_inner(
2090 struct binder_transaction *t)
2091{
2092 struct binder_thread *from;
2093
2094 from = binder_get_txn_from(t);
2095 if (!from)
2096 return NULL;
2097 binder_inner_proc_lock(from->proc);
2098 if (t->from) {
2099 BUG_ON(from != t->from);
2100 return from;
2101 }
2102 binder_inner_proc_unlock(from->proc);
2103 binder_thread_dec_tmpref(from);
2104 return NULL;
2105}
2106
Todd Kjos21ef40a2017-03-30 18:02:13 -07002107static void binder_free_transaction(struct binder_transaction *t)
2108{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002109 if (t->buffer)
2110 t->buffer->transaction = NULL;
2111 kfree(t);
2112 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2113}
2114
2115static void binder_send_failed_reply(struct binder_transaction *t,
2116 uint32_t error_code)
2117{
2118 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002119 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09002120
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002121 BUG_ON(t->flags & TF_ONE_WAY);
2122 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002123 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002124 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002125 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2126 "send failed reply for transaction %d to %d:%d\n",
2127 t->debug_id,
2128 target_thread->proc->pid,
2129 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002130
Martijn Coenen995a36e2017-06-02 13:36:52 -07002131 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07002132 if (target_thread->reply_error.cmd == BR_OK) {
2133 target_thread->reply_error.cmd = error_code;
Martijn Coenen1af61802017-10-19 15:04:46 +02002134 binder_enqueue_thread_work_ilocked(
2135 target_thread,
2136 &target_thread->reply_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002137 wake_up_interruptible(&target_thread->wait);
2138 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07002139 WARN(1, "Unexpected reply error: %u\n",
2140 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002141 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002142 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002143 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07002144 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002145 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002146 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002147 next = t->from_parent;
2148
2149 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2150 "send failed reply for transaction %d, target dead\n",
2151 t->debug_id);
2152
Todd Kjos21ef40a2017-03-30 18:02:13 -07002153 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002154 if (next == NULL) {
2155 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2156 "reply failed, no target thread at root\n");
2157 return;
2158 }
2159 t = next;
2160 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2161 "reply failed, no target thread -- retry %d\n",
2162 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002163 }
2164}
2165
Martijn Coenen00c80372016-07-13 12:06:49 +02002166/**
Martijn Coenen3217ccc2017-08-24 15:23:36 +02002167 * binder_cleanup_transaction() - cleans up undelivered transaction
2168 * @t: transaction that needs to be cleaned up
2169 * @reason: reason the transaction wasn't delivered
2170 * @error_code: error to return to caller (if synchronous call)
2171 */
2172static void binder_cleanup_transaction(struct binder_transaction *t,
2173 const char *reason,
2174 uint32_t error_code)
2175{
2176 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2177 binder_send_failed_reply(t, error_code);
2178 } else {
2179 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2180 "undelivered transaction %d, %s\n",
2181 t->debug_id, reason);
2182 binder_free_transaction(t);
2183 }
2184}
2185
2186/**
Martijn Coenen00c80372016-07-13 12:06:49 +02002187 * binder_validate_object() - checks for a valid metadata object in a buffer.
2188 * @buffer: binder_buffer that we're parsing.
2189 * @offset: offset in the buffer at which to validate an object.
2190 *
2191 * Return: If there's a valid metadata object at @offset in @buffer, the
2192 * size of that object. Otherwise, it returns zero.
2193 */
2194static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2195{
2196 /* Check if we can read a header first */
2197 struct binder_object_header *hdr;
2198 size_t object_size = 0;
2199
2200 if (offset > buffer->data_size - sizeof(*hdr) ||
2201 buffer->data_size < sizeof(*hdr) ||
2202 !IS_ALIGNED(offset, sizeof(u32)))
2203 return 0;
2204
2205 /* Ok, now see if we can read a complete object. */
2206 hdr = (struct binder_object_header *)(buffer->data + offset);
2207 switch (hdr->type) {
2208 case BINDER_TYPE_BINDER:
2209 case BINDER_TYPE_WEAK_BINDER:
2210 case BINDER_TYPE_HANDLE:
2211 case BINDER_TYPE_WEAK_HANDLE:
2212 object_size = sizeof(struct flat_binder_object);
2213 break;
2214 case BINDER_TYPE_FD:
2215 object_size = sizeof(struct binder_fd_object);
2216 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002217 case BINDER_TYPE_PTR:
2218 object_size = sizeof(struct binder_buffer_object);
2219 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002220 case BINDER_TYPE_FDA:
2221 object_size = sizeof(struct binder_fd_array_object);
2222 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02002223 default:
2224 return 0;
2225 }
2226 if (offset <= buffer->data_size - object_size &&
2227 buffer->data_size >= object_size)
2228 return object_size;
2229 else
2230 return 0;
2231}
2232
Martijn Coenen5a6da532016-09-30 14:10:07 +02002233/**
2234 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2235 * @b: binder_buffer containing the object
2236 * @index: index in offset array at which the binder_buffer_object is
2237 * located
2238 * @start: points to the start of the offset array
2239 * @num_valid: the number of valid offsets in the offset array
2240 *
2241 * Return: If @index is within the valid range of the offset array
2242 * described by @start and @num_valid, and if there's a valid
2243 * binder_buffer_object at the offset found in index @index
2244 * of the offset array, that object is returned. Otherwise,
2245 * %NULL is returned.
2246 * Note that the offset found in index @index itself is not
2247 * verified; this function assumes that @num_valid elements
2248 * from @start were previously verified to have valid offsets.
2249 */
2250static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2251 binder_size_t index,
2252 binder_size_t *start,
2253 binder_size_t num_valid)
2254{
2255 struct binder_buffer_object *buffer_obj;
2256 binder_size_t *offp;
2257
2258 if (index >= num_valid)
2259 return NULL;
2260
2261 offp = start + index;
2262 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2263 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2264 return NULL;
2265
2266 return buffer_obj;
2267}
2268
2269/**
2270 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2271 * @b: transaction buffer
2272 * @objects_start start of objects buffer
2273 * @buffer: binder_buffer_object in which to fix up
2274 * @offset: start offset in @buffer to fix up
2275 * @last_obj: last binder_buffer_object that we fixed up in
2276 * @last_min_offset: minimum fixup offset in @last_obj
2277 *
2278 * Return: %true if a fixup in buffer @buffer at offset @offset is
2279 * allowed.
2280 *
2281 * For safety reasons, we only allow fixups inside a buffer to happen
2282 * at increasing offsets; additionally, we only allow fixup on the last
2283 * buffer object that was verified, or one of its parents.
2284 *
2285 * Example of what is allowed:
2286 *
2287 * A
2288 * B (parent = A, offset = 0)
2289 * C (parent = A, offset = 16)
2290 * D (parent = C, offset = 0)
2291 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2292 *
2293 * Examples of what is not allowed:
2294 *
2295 * Decreasing offsets within the same parent:
2296 * A
2297 * C (parent = A, offset = 16)
2298 * B (parent = A, offset = 0) // decreasing offset within A
2299 *
2300 * Referring to a parent that wasn't the last object or any of its parents:
2301 * A
2302 * B (parent = A, offset = 0)
2303 * C (parent = A, offset = 0)
2304 * C (parent = A, offset = 16)
2305 * D (parent = B, offset = 0) // B is not A or any of A's parents
2306 */
2307static bool binder_validate_fixup(struct binder_buffer *b,
2308 binder_size_t *objects_start,
2309 struct binder_buffer_object *buffer,
2310 binder_size_t fixup_offset,
2311 struct binder_buffer_object *last_obj,
2312 binder_size_t last_min_offset)
2313{
2314 if (!last_obj) {
2315 /* Nothing to fix up in */
2316 return false;
2317 }
2318
2319 while (last_obj != buffer) {
2320 /*
2321 * Safe to retrieve the parent of last_obj, since it
2322 * was already previously verified by the driver.
2323 */
2324 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2325 return false;
2326 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2327 last_obj = (struct binder_buffer_object *)
2328 (b->data + *(objects_start + last_obj->parent));
2329 }
2330 return (fixup_offset >= last_min_offset);
2331}
2332
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002333static void binder_transaction_buffer_release(struct binder_proc *proc,
2334 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002335 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002336{
Martijn Coenen5a6da532016-09-30 14:10:07 +02002337 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002338 int debug_id = buffer->debug_id;
2339
2340 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302341 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002342 proc->pid, buffer->debug_id,
2343 buffer->data_size, buffer->offsets_size, failed_at);
2344
2345 if (buffer->target_node)
2346 binder_dec_node(buffer->target_node, 1, 0);
2347
Martijn Coenen5a6da532016-09-30 14:10:07 +02002348 off_start = (binder_size_t *)(buffer->data +
2349 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002350 if (failed_at)
2351 off_end = failed_at;
2352 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002353 off_end = (void *)off_start + buffer->offsets_size;
2354 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002355 struct binder_object_header *hdr;
2356 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002357
Martijn Coenen00c80372016-07-13 12:06:49 +02002358 if (object_size == 0) {
2359 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002360 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002361 continue;
2362 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002363 hdr = (struct binder_object_header *)(buffer->data + *offp);
2364 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002365 case BINDER_TYPE_BINDER:
2366 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002367 struct flat_binder_object *fp;
2368 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002369
Martijn Coenen00c80372016-07-13 12:06:49 +02002370 fp = to_flat_binder_object(hdr);
2371 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002372 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002373 pr_err("transaction release %d bad node %016llx\n",
2374 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002375 break;
2376 }
2377 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002378 " node %d u%016llx\n",
2379 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002380 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2381 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002382 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002383 } break;
2384 case BINDER_TYPE_HANDLE:
2385 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002386 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002387 struct binder_ref_data rdata;
2388 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002389
Martijn Coenen00c80372016-07-13 12:06:49 +02002390 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002391 ret = binder_dec_ref_for_handle(proc, fp->handle,
2392 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2393
2394 if (ret) {
2395 pr_err("transaction release %d bad handle %d, ret = %d\n",
2396 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002397 break;
2398 }
2399 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002400 " ref %d desc %d\n",
2401 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002402 } break;
2403
Martijn Coenen00c80372016-07-13 12:06:49 +02002404 case BINDER_TYPE_FD: {
2405 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2406
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002407 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002408 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002409 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002410 task_close_fd(proc, fp->fd);
2411 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002412 case BINDER_TYPE_PTR:
2413 /*
2414 * Nothing to do here, this will get cleaned up when the
2415 * transaction buffer gets freed
2416 */
2417 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002418 case BINDER_TYPE_FDA: {
2419 struct binder_fd_array_object *fda;
2420 struct binder_buffer_object *parent;
2421 uintptr_t parent_buffer;
2422 u32 *fd_array;
2423 size_t fd_index;
2424 binder_size_t fd_buf_size;
2425
2426 fda = to_binder_fd_array_object(hdr);
2427 parent = binder_validate_ptr(buffer, fda->parent,
2428 off_start,
2429 offp - off_start);
2430 if (!parent) {
2431 pr_err("transaction release %d bad parent offset",
2432 debug_id);
2433 continue;
2434 }
2435 /*
2436 * Since the parent was already fixed up, convert it
2437 * back to kernel address space to access it
2438 */
2439 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002440 binder_alloc_get_user_buffer_offset(
2441 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002442
2443 fd_buf_size = sizeof(u32) * fda->num_fds;
2444 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2445 pr_err("transaction release %d invalid number of fds (%lld)\n",
2446 debug_id, (u64)fda->num_fds);
2447 continue;
2448 }
2449 if (fd_buf_size > parent->length ||
2450 fda->parent_offset > parent->length - fd_buf_size) {
2451 /* No space for all file descriptors here. */
2452 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2453 debug_id, (u64)fda->num_fds);
2454 continue;
2455 }
2456 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2457 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2458 task_close_fd(proc, fd_array[fd_index]);
2459 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002460 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002461 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002462 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002463 break;
2464 }
2465 }
2466}
2467
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002468static int binder_translate_binder(struct flat_binder_object *fp,
2469 struct binder_transaction *t,
2470 struct binder_thread *thread)
2471{
2472 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002473 struct binder_proc *proc = thread->proc;
2474 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002475 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002476 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002477
2478 node = binder_get_node(proc, fp->binder);
2479 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002480 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002481 if (!node)
2482 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002483 }
2484 if (fp->cookie != node->cookie) {
2485 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2486 proc->pid, thread->pid, (u64)fp->binder,
2487 node->debug_id, (u64)fp->cookie,
2488 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002489 ret = -EINVAL;
2490 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002491 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002492 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2493 ret = -EPERM;
2494 goto done;
2495 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002496
Todd Kjosb0117bb2017-05-08 09:16:27 -07002497 ret = binder_inc_ref_for_node(target_proc, node,
2498 fp->hdr.type == BINDER_TYPE_BINDER,
2499 &thread->todo, &rdata);
2500 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002501 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002502
2503 if (fp->hdr.type == BINDER_TYPE_BINDER)
2504 fp->hdr.type = BINDER_TYPE_HANDLE;
2505 else
2506 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2507 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002508 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002509 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002510
Todd Kjosb0117bb2017-05-08 09:16:27 -07002511 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002512 binder_debug(BINDER_DEBUG_TRANSACTION,
2513 " node %d u%016llx -> ref %d desc %d\n",
2514 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002515 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002516done:
2517 binder_put_node(node);
2518 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002519}
2520
2521static int binder_translate_handle(struct flat_binder_object *fp,
2522 struct binder_transaction *t,
2523 struct binder_thread *thread)
2524{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002525 struct binder_proc *proc = thread->proc;
2526 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002527 struct binder_node *node;
2528 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002529 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002530
Todd Kjosb0117bb2017-05-08 09:16:27 -07002531 node = binder_get_node_from_ref(proc, fp->handle,
2532 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2533 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002534 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2535 proc->pid, thread->pid, fp->handle);
2536 return -EINVAL;
2537 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002538 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2539 ret = -EPERM;
2540 goto done;
2541 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002542
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002543 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002544 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002545 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2546 fp->hdr.type = BINDER_TYPE_BINDER;
2547 else
2548 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002549 fp->binder = node->ptr;
2550 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002551 if (node->proc)
2552 binder_inner_proc_lock(node->proc);
2553 binder_inc_node_nilocked(node,
2554 fp->hdr.type == BINDER_TYPE_BINDER,
2555 0, NULL);
2556 if (node->proc)
2557 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002558 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002559 binder_debug(BINDER_DEBUG_TRANSACTION,
2560 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002561 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2562 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002563 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002564 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002565 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002566
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002567 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002568 ret = binder_inc_ref_for_node(target_proc, node,
2569 fp->hdr.type == BINDER_TYPE_HANDLE,
2570 NULL, &dest_rdata);
2571 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002572 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002573
2574 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002575 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002576 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002577 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2578 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002579 binder_debug(BINDER_DEBUG_TRANSACTION,
2580 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002581 src_rdata.debug_id, src_rdata.desc,
2582 dest_rdata.debug_id, dest_rdata.desc,
2583 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002584 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002585done:
2586 binder_put_node(node);
2587 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002588}
2589
2590static int binder_translate_fd(int fd,
2591 struct binder_transaction *t,
2592 struct binder_thread *thread,
2593 struct binder_transaction *in_reply_to)
2594{
2595 struct binder_proc *proc = thread->proc;
2596 struct binder_proc *target_proc = t->to_proc;
2597 int target_fd;
2598 struct file *file;
2599 int ret;
2600 bool target_allows_fd;
2601
2602 if (in_reply_to)
2603 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2604 else
2605 target_allows_fd = t->buffer->target_node->accept_fds;
2606 if (!target_allows_fd) {
2607 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2608 proc->pid, thread->pid,
2609 in_reply_to ? "reply" : "transaction",
2610 fd);
2611 ret = -EPERM;
2612 goto err_fd_not_accepted;
2613 }
2614
2615 file = fget(fd);
2616 if (!file) {
2617 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2618 proc->pid, thread->pid, fd);
2619 ret = -EBADF;
2620 goto err_fget;
2621 }
2622 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2623 if (ret < 0) {
2624 ret = -EPERM;
2625 goto err_security;
2626 }
2627
2628 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2629 if (target_fd < 0) {
2630 ret = -ENOMEM;
2631 goto err_get_unused_fd;
2632 }
2633 task_fd_install(target_proc, target_fd, file);
2634 trace_binder_transaction_fd(t, fd, target_fd);
2635 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2636 fd, target_fd);
2637
2638 return target_fd;
2639
2640err_get_unused_fd:
2641err_security:
2642 fput(file);
2643err_fget:
2644err_fd_not_accepted:
2645 return ret;
2646}
2647
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002648static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2649 struct binder_buffer_object *parent,
2650 struct binder_transaction *t,
2651 struct binder_thread *thread,
2652 struct binder_transaction *in_reply_to)
2653{
2654 binder_size_t fdi, fd_buf_size, num_installed_fds;
2655 int target_fd;
2656 uintptr_t parent_buffer;
2657 u32 *fd_array;
2658 struct binder_proc *proc = thread->proc;
2659 struct binder_proc *target_proc = t->to_proc;
2660
2661 fd_buf_size = sizeof(u32) * fda->num_fds;
2662 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2663 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2664 proc->pid, thread->pid, (u64)fda->num_fds);
2665 return -EINVAL;
2666 }
2667 if (fd_buf_size > parent->length ||
2668 fda->parent_offset > parent->length - fd_buf_size) {
2669 /* No space for all file descriptors here. */
2670 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2671 proc->pid, thread->pid, (u64)fda->num_fds);
2672 return -EINVAL;
2673 }
2674 /*
2675 * Since the parent was already fixed up, convert it
2676 * back to the kernel address space to access it
2677 */
Todd Kjosd325d372016-10-10 10:40:53 -07002678 parent_buffer = parent->buffer -
2679 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002680 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2681 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2682 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2683 proc->pid, thread->pid);
2684 return -EINVAL;
2685 }
2686 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2687 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2688 in_reply_to);
2689 if (target_fd < 0)
2690 goto err_translate_fd_failed;
2691 fd_array[fdi] = target_fd;
2692 }
2693 return 0;
2694
2695err_translate_fd_failed:
2696 /*
2697 * Failed to allocate fd or security error, free fds
2698 * installed so far.
2699 */
2700 num_installed_fds = fdi;
2701 for (fdi = 0; fdi < num_installed_fds; fdi++)
2702 task_close_fd(target_proc, fd_array[fdi]);
2703 return target_fd;
2704}
2705
Martijn Coenen5a6da532016-09-30 14:10:07 +02002706static int binder_fixup_parent(struct binder_transaction *t,
2707 struct binder_thread *thread,
2708 struct binder_buffer_object *bp,
2709 binder_size_t *off_start,
2710 binder_size_t num_valid,
2711 struct binder_buffer_object *last_fixup_obj,
2712 binder_size_t last_fixup_min_off)
2713{
2714 struct binder_buffer_object *parent;
2715 u8 *parent_buffer;
2716 struct binder_buffer *b = t->buffer;
2717 struct binder_proc *proc = thread->proc;
2718 struct binder_proc *target_proc = t->to_proc;
2719
2720 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2721 return 0;
2722
2723 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2724 if (!parent) {
2725 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2726 proc->pid, thread->pid);
2727 return -EINVAL;
2728 }
2729
2730 if (!binder_validate_fixup(b, off_start,
2731 parent, bp->parent_offset,
2732 last_fixup_obj,
2733 last_fixup_min_off)) {
2734 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2735 proc->pid, thread->pid);
2736 return -EINVAL;
2737 }
2738
2739 if (parent->length < sizeof(binder_uintptr_t) ||
2740 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2741 /* No space for a pointer here! */
2742 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2743 proc->pid, thread->pid);
2744 return -EINVAL;
2745 }
2746 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002747 binder_alloc_get_user_buffer_offset(
2748 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002749 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2750
2751 return 0;
2752}
2753
Martijn Coenen053be422017-06-06 15:17:46 -07002754/**
2755 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2756 * @t: transaction to send
2757 * @proc: process to send the transaction to
2758 * @thread: thread in @proc to send the transaction to (may be NULL)
2759 *
2760 * This function queues a transaction to the specified process. It will try
2761 * to find a thread in the target process to handle the transaction and
2762 * wake it up. If no thread is found, the work is queued to the proc
2763 * waitqueue.
2764 *
2765 * If the @thread parameter is not NULL, the transaction is always queued
2766 * to the waitlist of that specific thread.
2767 *
2768 * Return: true if the transactions was successfully queued
2769 * false if the target process or thread is dead
2770 */
2771static bool binder_proc_transaction(struct binder_transaction *t,
2772 struct binder_proc *proc,
2773 struct binder_thread *thread)
2774{
Martijn Coenen053be422017-06-06 15:17:46 -07002775 struct binder_node *node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002776 struct binder_priority node_prio;
Martijn Coenen053be422017-06-06 15:17:46 -07002777 bool oneway = !!(t->flags & TF_ONE_WAY);
Martijn Coenen1af61802017-10-19 15:04:46 +02002778 bool pending_async = false;
Martijn Coenen053be422017-06-06 15:17:46 -07002779
2780 BUG_ON(!node);
2781 binder_node_lock(node);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002782 node_prio.prio = node->min_priority;
2783 node_prio.sched_policy = node->sched_policy;
2784
Martijn Coenen053be422017-06-06 15:17:46 -07002785 if (oneway) {
2786 BUG_ON(thread);
2787 if (node->has_async_transaction) {
Martijn Coenen1af61802017-10-19 15:04:46 +02002788 pending_async = true;
Martijn Coenen053be422017-06-06 15:17:46 -07002789 } else {
2790 node->has_async_transaction = 1;
2791 }
2792 }
2793
2794 binder_inner_proc_lock(proc);
2795
2796 if (proc->is_dead || (thread && thread->is_dead)) {
2797 binder_inner_proc_unlock(proc);
2798 binder_node_unlock(node);
2799 return false;
2800 }
2801
Martijn Coenen1af61802017-10-19 15:04:46 +02002802 if (!thread && !pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002803 thread = binder_select_thread_ilocked(proc);
2804
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002805 if (thread) {
Martijn Coenenc46810c2017-06-23 10:13:43 -07002806 binder_transaction_priority(thread->task, t, node_prio,
2807 node->inherit_rt);
Martijn Coenen1af61802017-10-19 15:04:46 +02002808 binder_enqueue_thread_work_ilocked(thread, &t->work);
2809 } else if (!pending_async) {
2810 binder_enqueue_work_ilocked(&t->work, &proc->todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002811 } else {
Martijn Coenen1af61802017-10-19 15:04:46 +02002812 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07002813 }
Martijn Coenen053be422017-06-06 15:17:46 -07002814
Martijn Coenen1af61802017-10-19 15:04:46 +02002815 if (!pending_async)
Martijn Coenen053be422017-06-06 15:17:46 -07002816 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2817
2818 binder_inner_proc_unlock(proc);
2819 binder_node_unlock(node);
2820
2821 return true;
2822}
2823
Todd Kjos291d9682017-09-25 08:55:09 -07002824/**
2825 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2826 * @node: struct binder_node for which to get refs
2827 * @proc: returns @node->proc if valid
2828 * @error: if no @proc then returns BR_DEAD_REPLY
2829 *
2830 * User-space normally keeps the node alive when creating a transaction
2831 * since it has a reference to the target. The local strong ref keeps it
2832 * alive if the sending process dies before the target process processes
2833 * the transaction. If the source process is malicious or has a reference
2834 * counting bug, relying on the local strong ref can fail.
2835 *
2836 * Since user-space can cause the local strong ref to go away, we also take
2837 * a tmpref on the node to ensure it survives while we are constructing
2838 * the transaction. We also need a tmpref on the proc while we are
2839 * constructing the transaction, so we take that here as well.
2840 *
2841 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2842 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2843 * target proc has died, @error is set to BR_DEAD_REPLY
2844 */
2845static struct binder_node *binder_get_node_refs_for_txn(
2846 struct binder_node *node,
2847 struct binder_proc **procp,
2848 uint32_t *error)
2849{
2850 struct binder_node *target_node = NULL;
2851
2852 binder_node_inner_lock(node);
2853 if (node->proc) {
2854 target_node = node;
2855 binder_inc_node_nilocked(node, 1, 0, NULL);
2856 binder_inc_node_tmpref_ilocked(node);
2857 node->proc->tmp_ref++;
2858 *procp = node->proc;
2859 } else
2860 *error = BR_DEAD_REPLY;
2861 binder_node_inner_unlock(node);
2862
2863 return target_node;
2864}
2865
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002866static void binder_transaction(struct binder_proc *proc,
2867 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002868 struct binder_transaction_data *tr, int reply,
2869 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002870{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002871 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002872 struct binder_transaction *t;
2873 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002874 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002875 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002876 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002877 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002878 struct binder_thread *target_thread = NULL;
2879 struct binder_node *target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002880 struct binder_transaction *in_reply_to = NULL;
2881 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002882 uint32_t return_error = 0;
2883 uint32_t return_error_param = 0;
2884 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002885 struct binder_buffer_object *last_fixup_obj = NULL;
2886 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002887 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002888 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002889
2890 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002891 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002892 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2893 e->from_proc = proc->pid;
2894 e->from_thread = thread->pid;
2895 e->target_handle = tr->target.handle;
2896 e->data_size = tr->data_size;
2897 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002898 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002899
2900 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002901 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002902 in_reply_to = thread->transaction_stack;
2903 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002904 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302905 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002906 proc->pid, thread->pid);
2907 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002908 return_error_param = -EPROTO;
2909 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002910 goto err_empty_call_stack;
2911 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002912 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002913 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302914 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002915 proc->pid, thread->pid, in_reply_to->debug_id,
2916 in_reply_to->to_proc ?
2917 in_reply_to->to_proc->pid : 0,
2918 in_reply_to->to_thread ?
2919 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002920 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002921 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002922 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002923 return_error_param = -EPROTO;
2924 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002925 in_reply_to = NULL;
2926 goto err_bad_call_stack;
2927 }
2928 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002929 binder_inner_proc_unlock(proc);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002930 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002931 if (target_thread == NULL) {
2932 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002933 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002934 goto err_dead_binder;
2935 }
2936 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302937 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002938 proc->pid, thread->pid,
2939 target_thread->transaction_stack ?
2940 target_thread->transaction_stack->debug_id : 0,
2941 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002942 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002943 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002944 return_error_param = -EPROTO;
2945 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002946 in_reply_to = NULL;
2947 target_thread = NULL;
2948 goto err_dead_binder;
2949 }
2950 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002951 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002952 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002953 } else {
2954 if (tr->target.handle) {
2955 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002956
Todd Kjosc37162d2017-05-26 11:56:29 -07002957 /*
2958 * There must already be a strong ref
2959 * on this node. If so, do a strong
2960 * increment on the node to ensure it
2961 * stays alive until the transaction is
2962 * done.
2963 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002964 binder_proc_lock(proc);
2965 ref = binder_get_ref_olocked(proc, tr->target.handle,
2966 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002967 if (ref) {
Todd Kjos291d9682017-09-25 08:55:09 -07002968 target_node = binder_get_node_refs_for_txn(
2969 ref->node, &target_proc,
2970 &return_error);
2971 } else {
2972 binder_user_error("%d:%d got transaction to invalid handle\n",
2973 proc->pid, thread->pid);
2974 return_error = BR_FAILED_REPLY;
Todd Kjosc37162d2017-05-26 11:56:29 -07002975 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002976 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002977 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002978 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002979 target_node = context->binder_context_mgr_node;
Todd Kjos291d9682017-09-25 08:55:09 -07002980 if (target_node)
2981 target_node = binder_get_node_refs_for_txn(
2982 target_node, &target_proc,
2983 &return_error);
2984 else
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002985 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002986 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002987 }
Todd Kjos291d9682017-09-25 08:55:09 -07002988 if (!target_node) {
2989 /*
2990 * return_error is set above
2991 */
2992 return_error_param = -EINVAL;
Todd Kjose598d172017-03-22 17:19:52 -07002993 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002994 goto err_dead_binder;
2995 }
Todd Kjos291d9682017-09-25 08:55:09 -07002996 e->to_node = target_node->debug_id;
Stephen Smalley79af7302015-01-21 10:54:10 -05002997 if (security_binder_transaction(proc->tsk,
2998 target_proc->tsk) < 0) {
2999 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003000 return_error_param = -EPERM;
3001 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05003002 goto err_invalid_target_handle;
3003 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003004 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003005 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3006 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003007
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003008 tmp = thread->transaction_stack;
3009 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003010 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05303011 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003012 proc->pid, thread->pid, tmp->debug_id,
3013 tmp->to_proc ? tmp->to_proc->pid : 0,
3014 tmp->to_thread ?
3015 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07003016 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003017 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003018 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003019 return_error_param = -EPROTO;
3020 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003021 goto err_bad_call_stack;
3022 }
3023 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003024 struct binder_thread *from;
3025
3026 spin_lock(&tmp->lock);
3027 from = tmp->from;
3028 if (from && from->proc == target_proc) {
3029 atomic_inc(&from->tmp_ref);
3030 target_thread = from;
3031 spin_unlock(&tmp->lock);
3032 break;
3033 }
3034 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003035 tmp = tmp->from_parent;
3036 }
3037 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07003038 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003039 }
Martijn Coenen053be422017-06-06 15:17:46 -07003040 if (target_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003041 e->to_thread = target_thread->pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003042 e->to_proc = target_proc->pid;
3043
3044 /* TODO: reuse incoming transaction for reply */
3045 t = kzalloc(sizeof(*t), GFP_KERNEL);
3046 if (t == NULL) {
3047 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003048 return_error_param = -ENOMEM;
3049 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003050 goto err_alloc_t_failed;
3051 }
3052 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07003053 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003054
3055 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3056 if (tcomplete == NULL) {
3057 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003058 return_error_param = -ENOMEM;
3059 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003060 goto err_alloc_tcomplete_failed;
3061 }
3062 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3063
Todd Kjos1cfe6272017-05-24 13:33:28 -07003064 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003065
3066 if (reply)
3067 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003068 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003069 proc->pid, thread->pid, t->debug_id,
3070 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003071 (u64)tr->data.ptr.buffer,
3072 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003073 (u64)tr->data_size, (u64)tr->offsets_size,
3074 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003075 else
3076 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02003077 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003078 proc->pid, thread->pid, t->debug_id,
3079 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003080 (u64)tr->data.ptr.buffer,
3081 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02003082 (u64)tr->data_size, (u64)tr->offsets_size,
3083 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003084
3085 if (!reply && !(tr->flags & TF_ONE_WAY))
3086 t->from = thread;
3087 else
3088 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03003089 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003090 t->to_proc = target_proc;
3091 t->to_thread = target_thread;
3092 t->code = tr->code;
3093 t->flags = tr->flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07003094 if (!(t->flags & TF_ONE_WAY) &&
3095 binder_supported_policy(current->policy)) {
3096 /* Inherit supported policies for synchronous transactions */
3097 t->priority.sched_policy = current->policy;
3098 t->priority.prio = current->normal_prio;
3099 } else {
3100 /* Otherwise, fall back to the default priority */
3101 t->priority = target_proc->default_priority;
3102 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003103
3104 trace_binder_transaction(reply, t, target_node);
3105
Todd Kjosd325d372016-10-10 10:40:53 -07003106 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02003107 tr->offsets_size, extra_buffers_size,
3108 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07003109 if (IS_ERR(t->buffer)) {
3110 /*
3111 * -ESRCH indicates VMA cleared. The target is dying.
3112 */
3113 return_error_param = PTR_ERR(t->buffer);
3114 return_error = return_error_param == -ESRCH ?
3115 BR_DEAD_REPLY : BR_FAILED_REPLY;
3116 return_error_line = __LINE__;
3117 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003118 goto err_binder_alloc_buf_failed;
3119 }
3120 t->buffer->allow_user_free = 0;
3121 t->buffer->debug_id = t->debug_id;
3122 t->buffer->transaction = t;
3123 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003124 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003125 off_start = (binder_size_t *)(t->buffer->data +
3126 ALIGN(tr->data_size, sizeof(void *)));
3127 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003128
Arve Hjønnevågda498892014-02-21 14:40:26 -08003129 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3130 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303131 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3132 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003133 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003134 return_error_param = -EFAULT;
3135 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003136 goto err_copy_data_failed;
3137 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003138 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3139 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303140 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3141 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003142 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003143 return_error_param = -EFAULT;
3144 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003145 goto err_copy_data_failed;
3146 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003147 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3148 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3149 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003150 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003151 return_error_param = -EINVAL;
3152 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003153 goto err_bad_offset;
3154 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02003155 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3156 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3157 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05303158 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003159 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003160 return_error_param = -EINVAL;
3161 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003162 goto err_bad_offset;
3163 }
3164 off_end = (void *)off_start + tr->offsets_size;
3165 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3166 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003167 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003168 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02003169 struct binder_object_header *hdr;
3170 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09003171
Martijn Coenen00c80372016-07-13 12:06:49 +02003172 if (object_size == 0 || *offp < off_min) {
3173 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003174 proc->pid, thread->pid, (u64)*offp,
3175 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02003176 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003177 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003178 return_error_param = -EINVAL;
3179 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003180 goto err_bad_offset;
3181 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003182
3183 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3184 off_min = *offp + object_size;
3185 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003186 case BINDER_TYPE_BINDER:
3187 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003188 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003189
Martijn Coenen00c80372016-07-13 12:06:49 +02003190 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003191 ret = binder_translate_binder(fp, t, thread);
3192 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02003193 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003194 return_error_param = ret;
3195 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003196 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003197 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003198 } break;
3199 case BINDER_TYPE_HANDLE:
3200 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003201 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003202
Martijn Coenen00c80372016-07-13 12:06:49 +02003203 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003204 ret = binder_translate_handle(fp, t, thread);
3205 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003206 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003207 return_error_param = ret;
3208 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003209 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003210 }
3211 } break;
3212
3213 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003214 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003215 int target_fd = binder_translate_fd(fp->fd, t, thread,
3216 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003217
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003218 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003219 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003220 return_error_param = target_fd;
3221 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003222 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003223 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003224 fp->pad_binder = 0;
3225 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003226 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003227 case BINDER_TYPE_FDA: {
3228 struct binder_fd_array_object *fda =
3229 to_binder_fd_array_object(hdr);
3230 struct binder_buffer_object *parent =
3231 binder_validate_ptr(t->buffer, fda->parent,
3232 off_start,
3233 offp - off_start);
3234 if (!parent) {
3235 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3236 proc->pid, thread->pid);
3237 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003238 return_error_param = -EINVAL;
3239 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003240 goto err_bad_parent;
3241 }
3242 if (!binder_validate_fixup(t->buffer, off_start,
3243 parent, fda->parent_offset,
3244 last_fixup_obj,
3245 last_fixup_min_off)) {
3246 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3247 proc->pid, thread->pid);
3248 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003249 return_error_param = -EINVAL;
3250 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003251 goto err_bad_parent;
3252 }
3253 ret = binder_translate_fd_array(fda, parent, t, thread,
3254 in_reply_to);
3255 if (ret < 0) {
3256 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003257 return_error_param = ret;
3258 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003259 goto err_translate_failed;
3260 }
3261 last_fixup_obj = parent;
3262 last_fixup_min_off =
3263 fda->parent_offset + sizeof(u32) * fda->num_fds;
3264 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003265 case BINDER_TYPE_PTR: {
3266 struct binder_buffer_object *bp =
3267 to_binder_buffer_object(hdr);
3268 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003269
Martijn Coenen5a6da532016-09-30 14:10:07 +02003270 if (bp->length > buf_left) {
3271 binder_user_error("%d:%d got transaction with too large buffer\n",
3272 proc->pid, thread->pid);
3273 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003274 return_error_param = -EINVAL;
3275 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003276 goto err_bad_offset;
3277 }
3278 if (copy_from_user(sg_bufp,
3279 (const void __user *)(uintptr_t)
3280 bp->buffer, bp->length)) {
3281 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3282 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07003283 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003284 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003285 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003286 goto err_copy_data_failed;
3287 }
3288 /* Fixup buffer pointer to target proc address space */
3289 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07003290 binder_alloc_get_user_buffer_offset(
3291 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003292 sg_bufp += ALIGN(bp->length, sizeof(u64));
3293
3294 ret = binder_fixup_parent(t, thread, bp, off_start,
3295 offp - off_start,
3296 last_fixup_obj,
3297 last_fixup_min_off);
3298 if (ret < 0) {
3299 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003300 return_error_param = ret;
3301 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003302 goto err_translate_failed;
3303 }
3304 last_fixup_obj = bp;
3305 last_fixup_min_off = 0;
3306 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003307 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01003308 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02003309 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003310 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003311 return_error_param = -EINVAL;
3312 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003313 goto err_bad_object_type;
3314 }
3315 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003316 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003317 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003318
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003319 if (reply) {
Martijn Coenen1af61802017-10-19 15:04:46 +02003320 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003321 binder_inner_proc_lock(target_proc);
3322 if (target_thread->is_dead) {
3323 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003324 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003325 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003326 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003327 binder_pop_transaction_ilocked(target_thread, in_reply_to);
Martijn Coenen1af61802017-10-19 15:04:46 +02003328 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003329 binder_inner_proc_unlock(target_proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003330 wake_up_interruptible_sync(&target_thread->wait);
Martijn Coenenecd972d2017-05-26 10:48:56 -07003331 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003332 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003333 } else if (!(t->flags & TF_ONE_WAY)) {
3334 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003335 binder_inner_proc_lock(proc);
Martijn Coenendac2e9c2017-11-13 09:55:21 +01003336 /*
3337 * Defer the TRANSACTION_COMPLETE, so we don't return to
3338 * userspace immediately; this allows the target process to
3339 * immediately start processing this transaction, reducing
3340 * latency. We will then return the TRANSACTION_COMPLETE when
3341 * the target replies (or there is an error).
3342 */
3343 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003344 t->need_reply = 1;
3345 t->from_parent = thread->transaction_stack;
3346 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003347 binder_inner_proc_unlock(proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003348 if (!binder_proc_transaction(t, target_proc, target_thread)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003349 binder_inner_proc_lock(proc);
3350 binder_pop_transaction_ilocked(thread, t);
3351 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003352 goto err_dead_proc_or_thread;
3353 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003354 } else {
3355 BUG_ON(target_node == NULL);
3356 BUG_ON(t->buffer->async_transaction != 1);
Martijn Coenen1af61802017-10-19 15:04:46 +02003357 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen053be422017-06-06 15:17:46 -07003358 if (!binder_proc_transaction(t, target_proc, NULL))
Todd Kjos2f993e22017-05-12 14:42:55 -07003359 goto err_dead_proc_or_thread;
Riley Andrewsb5968812015-09-01 12:42:07 -07003360 }
Todd Kjos2f993e22017-05-12 14:42:55 -07003361 if (target_thread)
3362 binder_thread_dec_tmpref(target_thread);
3363 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003364 if (target_node)
3365 binder_dec_node_tmpref(target_node);
Todd Kjos1cfe6272017-05-24 13:33:28 -07003366 /*
3367 * write barrier to synchronize with initialization
3368 * of log entry
3369 */
3370 smp_wmb();
3371 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003372 return;
3373
Todd Kjos2f993e22017-05-12 14:42:55 -07003374err_dead_proc_or_thread:
3375 return_error = BR_DEAD_REPLY;
3376 return_error_line = __LINE__;
Xu YiPing86578a02017-05-22 11:26:23 -07003377 binder_dequeue_work(proc, tcomplete);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003378err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003379err_bad_object_type:
3380err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003381err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003382err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003383 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003384 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjos291d9682017-09-25 08:55:09 -07003385 if (target_node)
3386 binder_dec_node_tmpref(target_node);
Todd Kjosc37162d2017-05-26 11:56:29 -07003387 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003388 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07003389 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003390err_binder_alloc_buf_failed:
3391 kfree(tcomplete);
3392 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3393err_alloc_tcomplete_failed:
3394 kfree(t);
3395 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3396err_alloc_t_failed:
3397err_bad_call_stack:
3398err_empty_call_stack:
3399err_dead_binder:
3400err_invalid_target_handle:
Todd Kjos2f993e22017-05-12 14:42:55 -07003401 if (target_thread)
3402 binder_thread_dec_tmpref(target_thread);
3403 if (target_proc)
3404 binder_proc_dec_tmpref(target_proc);
Todd Kjos291d9682017-09-25 08:55:09 -07003405 if (target_node) {
Todd Kjosc37162d2017-05-26 11:56:29 -07003406 binder_dec_node(target_node, 1, 0);
Todd Kjos291d9682017-09-25 08:55:09 -07003407 binder_dec_node_tmpref(target_node);
3408 }
Todd Kjosc37162d2017-05-26 11:56:29 -07003409
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003410 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07003411 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3412 proc->pid, thread->pid, return_error, return_error_param,
3413 (u64)tr->data_size, (u64)tr->offsets_size,
3414 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003415
3416 {
3417 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09003418
Todd Kjose598d172017-03-22 17:19:52 -07003419 e->return_error = return_error;
3420 e->return_error_param = return_error_param;
3421 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003422 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3423 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07003424 /*
3425 * write barrier to synchronize with initialization
3426 * of log entry
3427 */
3428 smp_wmb();
3429 WRITE_ONCE(e->debug_id_done, t_debug_id);
3430 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003431 }
3432
Todd Kjos858b8da2017-04-21 17:35:12 -07003433 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003434 if (in_reply_to) {
Martijn Coenenecd972d2017-05-26 10:48:56 -07003435 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos858b8da2017-04-21 17:35:12 -07003436 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Martijn Coenen1af61802017-10-19 15:04:46 +02003437 binder_enqueue_thread_work(thread, &thread->return_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003438 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07003439 } else {
3440 thread->return_error.cmd = return_error;
Martijn Coenen1af61802017-10-19 15:04:46 +02003441 binder_enqueue_thread_work(thread, &thread->return_error.work);
Todd Kjos858b8da2017-04-21 17:35:12 -07003442 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003443}
3444
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003445static int binder_thread_write(struct binder_proc *proc,
3446 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003447 binder_uintptr_t binder_buffer, size_t size,
3448 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003449{
3450 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003451 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003452 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003453 void __user *ptr = buffer + *consumed;
3454 void __user *end = buffer + size;
3455
Todd Kjos858b8da2017-04-21 17:35:12 -07003456 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003457 int ret;
3458
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003459 if (get_user(cmd, (uint32_t __user *)ptr))
3460 return -EFAULT;
3461 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003462 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003463 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003464 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3465 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3466 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003467 }
3468 switch (cmd) {
3469 case BC_INCREFS:
3470 case BC_ACQUIRE:
3471 case BC_RELEASE:
3472 case BC_DECREFS: {
3473 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003474 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003475 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3476 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3477 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003478
3479 if (get_user(target, (uint32_t __user *)ptr))
3480 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003481
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003482 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003483 ret = -1;
3484 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003485 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003486 mutex_lock(&context->context_mgr_node_lock);
3487 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003488 if (ctx_mgr_node)
3489 ret = binder_inc_ref_for_node(
3490 proc, ctx_mgr_node,
3491 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003492 mutex_unlock(&context->context_mgr_node_lock);
3493 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003494 if (ret)
3495 ret = binder_update_ref_for_handle(
3496 proc, target, increment, strong,
3497 &rdata);
3498 if (!ret && rdata.desc != target) {
3499 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3500 proc->pid, thread->pid,
3501 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003502 }
3503 switch (cmd) {
3504 case BC_INCREFS:
3505 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003506 break;
3507 case BC_ACQUIRE:
3508 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003509 break;
3510 case BC_RELEASE:
3511 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003512 break;
3513 case BC_DECREFS:
3514 default:
3515 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003516 break;
3517 }
3518 if (ret) {
3519 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3520 proc->pid, thread->pid, debug_string,
3521 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003522 break;
3523 }
3524 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003525 "%d:%d %s ref %d desc %d s %d w %d\n",
3526 proc->pid, thread->pid, debug_string,
3527 rdata.debug_id, rdata.desc, rdata.strong,
3528 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003529 break;
3530 }
3531 case BC_INCREFS_DONE:
3532 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003533 binder_uintptr_t node_ptr;
3534 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003535 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003536 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003537
Arve Hjønnevågda498892014-02-21 14:40:26 -08003538 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003539 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003540 ptr += sizeof(binder_uintptr_t);
3541 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003542 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003543 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003544 node = binder_get_node(proc, node_ptr);
3545 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003546 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003547 proc->pid, thread->pid,
3548 cmd == BC_INCREFS_DONE ?
3549 "BC_INCREFS_DONE" :
3550 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003551 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003552 break;
3553 }
3554 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003555 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003556 proc->pid, thread->pid,
3557 cmd == BC_INCREFS_DONE ?
3558 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003559 (u64)node_ptr, node->debug_id,
3560 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003561 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003562 break;
3563 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003564 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003565 if (cmd == BC_ACQUIRE_DONE) {
3566 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303567 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003568 proc->pid, thread->pid,
3569 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003570 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003571 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003572 break;
3573 }
3574 node->pending_strong_ref = 0;
3575 } else {
3576 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303577 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003578 proc->pid, thread->pid,
3579 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003580 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003581 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003582 break;
3583 }
3584 node->pending_weak_ref = 0;
3585 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003586 free_node = binder_dec_node_nilocked(node,
3587 cmd == BC_ACQUIRE_DONE, 0);
3588 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003589 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003590 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003591 proc->pid, thread->pid,
3592 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003593 node->debug_id, node->local_strong_refs,
3594 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003595 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003596 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003597 break;
3598 }
3599 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303600 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003601 return -EINVAL;
3602 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303603 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003604 return -EINVAL;
3605
3606 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003607 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003608 struct binder_buffer *buffer;
3609
Arve Hjønnevågda498892014-02-21 14:40:26 -08003610 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003611 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003612 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003613
Todd Kjos076072a2017-04-21 14:32:11 -07003614 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3615 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003616 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003617 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3618 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003619 break;
3620 }
3621 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003622 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3623 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003624 break;
3625 }
3626 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003627 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3628 proc->pid, thread->pid, (u64)data_ptr,
3629 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003630 buffer->transaction ? "active" : "finished");
3631
3632 if (buffer->transaction) {
3633 buffer->transaction->buffer = NULL;
3634 buffer->transaction = NULL;
3635 }
3636 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003637 struct binder_node *buf_node;
3638 struct binder_work *w;
3639
3640 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003641 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003642 BUG_ON(!buf_node->has_async_transaction);
3643 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003644 w = binder_dequeue_work_head_ilocked(
3645 &buf_node->async_todo);
Martijn Coenen4501c042017-08-10 13:56:16 +02003646 if (!w) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003647 buf_node->has_async_transaction = 0;
Martijn Coenen4501c042017-08-10 13:56:16 +02003648 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003649 binder_enqueue_work_ilocked(
Martijn Coenen4501c042017-08-10 13:56:16 +02003650 w, &proc->todo);
3651 binder_wakeup_proc_ilocked(proc);
3652 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003653 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003654 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003655 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003656 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003657 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003658 break;
3659 }
3660
Martijn Coenen5a6da532016-09-30 14:10:07 +02003661 case BC_TRANSACTION_SG:
3662 case BC_REPLY_SG: {
3663 struct binder_transaction_data_sg tr;
3664
3665 if (copy_from_user(&tr, ptr, sizeof(tr)))
3666 return -EFAULT;
3667 ptr += sizeof(tr);
3668 binder_transaction(proc, thread, &tr.transaction_data,
3669 cmd == BC_REPLY_SG, tr.buffers_size);
3670 break;
3671 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003672 case BC_TRANSACTION:
3673 case BC_REPLY: {
3674 struct binder_transaction_data tr;
3675
3676 if (copy_from_user(&tr, ptr, sizeof(tr)))
3677 return -EFAULT;
3678 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003679 binder_transaction(proc, thread, &tr,
3680 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003681 break;
3682 }
3683
3684 case BC_REGISTER_LOOPER:
3685 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303686 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003687 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003688 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003689 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3690 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303691 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003692 proc->pid, thread->pid);
3693 } else if (proc->requested_threads == 0) {
3694 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303695 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003696 proc->pid, thread->pid);
3697 } else {
3698 proc->requested_threads--;
3699 proc->requested_threads_started++;
3700 }
3701 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003702 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003703 break;
3704 case BC_ENTER_LOOPER:
3705 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303706 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003707 proc->pid, thread->pid);
3708 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3709 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303710 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003711 proc->pid, thread->pid);
3712 }
3713 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3714 break;
3715 case BC_EXIT_LOOPER:
3716 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303717 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003718 proc->pid, thread->pid);
3719 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3720 break;
3721
3722 case BC_REQUEST_DEATH_NOTIFICATION:
3723 case BC_CLEAR_DEATH_NOTIFICATION: {
3724 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003725 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003726 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003727 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003728
3729 if (get_user(target, (uint32_t __user *)ptr))
3730 return -EFAULT;
3731 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003732 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003733 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003734 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003735 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3736 /*
3737 * Allocate memory for death notification
3738 * before taking lock
3739 */
3740 death = kzalloc(sizeof(*death), GFP_KERNEL);
3741 if (death == NULL) {
3742 WARN_ON(thread->return_error.cmd !=
3743 BR_OK);
3744 thread->return_error.cmd = BR_ERROR;
Martijn Coenen1af61802017-10-19 15:04:46 +02003745 binder_enqueue_thread_work(
3746 thread,
3747 &thread->return_error.work);
Todd Kjos5346bf32016-10-20 16:43:34 -07003748 binder_debug(
3749 BINDER_DEBUG_FAILED_TRANSACTION,
3750 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3751 proc->pid, thread->pid);
3752 break;
3753 }
3754 }
3755 binder_proc_lock(proc);
3756 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003757 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303758 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003759 proc->pid, thread->pid,
3760 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3761 "BC_REQUEST_DEATH_NOTIFICATION" :
3762 "BC_CLEAR_DEATH_NOTIFICATION",
3763 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003764 binder_proc_unlock(proc);
3765 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003766 break;
3767 }
3768
3769 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003770 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003771 proc->pid, thread->pid,
3772 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3773 "BC_REQUEST_DEATH_NOTIFICATION" :
3774 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003775 (u64)cookie, ref->data.debug_id,
3776 ref->data.desc, ref->data.strong,
3777 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003778
Martijn Coenenf9eac642017-05-22 11:26:23 -07003779 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003780 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3781 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303782 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003783 proc->pid, thread->pid);
Martijn Coenenf9eac642017-05-22 11:26:23 -07003784 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003785 binder_proc_unlock(proc);
3786 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003787 break;
3788 }
3789 binder_stats_created(BINDER_STAT_DEATH);
3790 INIT_LIST_HEAD(&death->work.entry);
3791 death->cookie = cookie;
3792 ref->death = death;
3793 if (ref->node->proc == NULL) {
3794 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Martijn Coenen3bdbe4c2017-08-10 13:50:52 +02003795
3796 binder_inner_proc_lock(proc);
3797 binder_enqueue_work_ilocked(
3798 &ref->death->work, &proc->todo);
3799 binder_wakeup_proc_ilocked(proc);
3800 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003801 }
3802 } else {
3803 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303804 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003805 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003806 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003807 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003808 break;
3809 }
3810 death = ref->death;
3811 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003812 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003813 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003814 (u64)death->cookie,
3815 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003816 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003817 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003818 break;
3819 }
3820 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003821 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003822 if (list_empty(&death->work.entry)) {
3823 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003824 if (thread->looper &
3825 (BINDER_LOOPER_STATE_REGISTERED |
3826 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003827 binder_enqueue_thread_work_ilocked(
3828 thread,
3829 &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003830 else {
3831 binder_enqueue_work_ilocked(
3832 &death->work,
3833 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003834 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003835 proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003836 }
3837 } else {
3838 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3839 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3840 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003841 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003842 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003843 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003844 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003845 } break;
3846 case BC_DEAD_BINDER_DONE: {
3847 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003848 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003849 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003850
Arve Hjønnevågda498892014-02-21 14:40:26 -08003851 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003852 return -EFAULT;
3853
Lisa Du7a64cd82016-02-17 09:32:52 +08003854 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003855 binder_inner_proc_lock(proc);
3856 list_for_each_entry(w, &proc->delivered_death,
3857 entry) {
3858 struct binder_ref_death *tmp_death =
3859 container_of(w,
3860 struct binder_ref_death,
3861 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003862
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003863 if (tmp_death->cookie == cookie) {
3864 death = tmp_death;
3865 break;
3866 }
3867 }
3868 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003869 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3870 proc->pid, thread->pid, (u64)cookie,
3871 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003872 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003873 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3874 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003875 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003876 break;
3877 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003878 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003879 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3880 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003881 if (thread->looper &
3882 (BINDER_LOOPER_STATE_REGISTERED |
3883 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen1af61802017-10-19 15:04:46 +02003884 binder_enqueue_thread_work_ilocked(
3885 thread, &death->work);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003886 else {
3887 binder_enqueue_work_ilocked(
3888 &death->work,
3889 &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07003890 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003891 }
3892 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003893 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003894 } break;
3895
3896 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303897 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003898 proc->pid, thread->pid, cmd);
3899 return -EINVAL;
3900 }
3901 *consumed = ptr - buffer;
3902 }
3903 return 0;
3904}
3905
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003906static void binder_stat_br(struct binder_proc *proc,
3907 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003908{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003909 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003910 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003911 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3912 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3913 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003914 }
3915}
3916
Todd Kjos60792612017-05-24 10:51:01 -07003917static int binder_put_node_cmd(struct binder_proc *proc,
3918 struct binder_thread *thread,
3919 void __user **ptrp,
3920 binder_uintptr_t node_ptr,
3921 binder_uintptr_t node_cookie,
3922 int node_debug_id,
3923 uint32_t cmd, const char *cmd_name)
3924{
3925 void __user *ptr = *ptrp;
3926
3927 if (put_user(cmd, (uint32_t __user *)ptr))
3928 return -EFAULT;
3929 ptr += sizeof(uint32_t);
3930
3931 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3932 return -EFAULT;
3933 ptr += sizeof(binder_uintptr_t);
3934
3935 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3936 return -EFAULT;
3937 ptr += sizeof(binder_uintptr_t);
3938
3939 binder_stat_br(proc, thread, cmd);
3940 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3941 proc->pid, thread->pid, cmd_name, node_debug_id,
3942 (u64)node_ptr, (u64)node_cookie);
3943
3944 *ptrp = ptr;
3945 return 0;
3946}
3947
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003948static int binder_wait_for_work(struct binder_thread *thread,
3949 bool do_proc_work)
3950{
3951 DEFINE_WAIT(wait);
3952 struct binder_proc *proc = thread->proc;
3953 int ret = 0;
3954
3955 freezer_do_not_count();
3956 binder_inner_proc_lock(proc);
3957 for (;;) {
3958 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3959 if (binder_has_work_ilocked(thread, do_proc_work))
3960 break;
3961 if (do_proc_work)
3962 list_add(&thread->waiting_thread_node,
3963 &proc->waiting_threads);
3964 binder_inner_proc_unlock(proc);
3965 schedule();
3966 binder_inner_proc_lock(proc);
3967 list_del_init(&thread->waiting_thread_node);
3968 if (signal_pending(current)) {
3969 ret = -ERESTARTSYS;
3970 break;
3971 }
3972 }
3973 finish_wait(&thread->wait, &wait);
3974 binder_inner_proc_unlock(proc);
3975 freezer_count();
3976
3977 return ret;
3978}
3979
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003980static int binder_thread_read(struct binder_proc *proc,
3981 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003982 binder_uintptr_t binder_buffer, size_t size,
3983 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003984{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003985 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003986 void __user *ptr = buffer + *consumed;
3987 void __user *end = buffer + size;
3988
3989 int ret = 0;
3990 int wait_for_proc_work;
3991
3992 if (*consumed == 0) {
3993 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3994 return -EFAULT;
3995 ptr += sizeof(uint32_t);
3996 }
3997
3998retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07003999 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004000 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07004001 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004002
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004003 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004004
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004005 trace_binder_wait_for_work(wait_for_proc_work,
4006 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004007 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004008 if (wait_for_proc_work) {
4009 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4010 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304011 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004012 proc->pid, thread->pid, thread->looper);
4013 wait_event_interruptible(binder_user_error_wait,
4014 binder_stop_on_user_error < 2);
4015 }
Martijn Coenenecd972d2017-05-26 10:48:56 -07004016 binder_restore_priority(current, proc->default_priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004017 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004018
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004019 if (non_block) {
4020 if (!binder_has_work(thread, wait_for_proc_work))
4021 ret = -EAGAIN;
4022 } else {
4023 ret = binder_wait_for_work(thread, wait_for_proc_work);
4024 }
4025
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004026 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4027
4028 if (ret)
4029 return ret;
4030
4031 while (1) {
4032 uint32_t cmd;
4033 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004034 struct binder_work *w = NULL;
4035 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004036 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07004037 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004038
Todd Kjose7f23ed2017-03-21 13:06:01 -07004039 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004040 if (!binder_worklist_empty_ilocked(&thread->todo))
4041 list = &thread->todo;
4042 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4043 wait_for_proc_work)
4044 list = &proc->todo;
4045 else {
4046 binder_inner_proc_unlock(proc);
4047
Dmitry Voytik395262a2014-09-08 18:16:34 +04004048 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08004049 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004050 goto retry;
4051 break;
4052 }
4053
Todd Kjose7f23ed2017-03-21 13:06:01 -07004054 if (end - ptr < sizeof(tr) + 4) {
4055 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004056 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004057 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004058 w = binder_dequeue_work_head_ilocked(list);
Martijn Coenen1af61802017-10-19 15:04:46 +02004059 if (binder_worklist_empty_ilocked(&thread->todo))
4060 thread->process_todo = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004061
4062 switch (w->type) {
4063 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004064 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004065 t = container_of(w, struct binder_transaction, work);
4066 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004067 case BINDER_WORK_RETURN_ERROR: {
4068 struct binder_error *e = container_of(
4069 w, struct binder_error, work);
4070
4071 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004072 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07004073 if (put_user(e->cmd, (uint32_t __user *)ptr))
4074 return -EFAULT;
4075 e->cmd = BR_OK;
4076 ptr += sizeof(uint32_t);
4077
4078 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07004079 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004080 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004081 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004082 cmd = BR_TRANSACTION_COMPLETE;
4083 if (put_user(cmd, (uint32_t __user *)ptr))
4084 return -EFAULT;
4085 ptr += sizeof(uint32_t);
4086
4087 binder_stat_br(proc, thread, cmd);
4088 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304089 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004090 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004091 kfree(w);
4092 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4093 } break;
4094 case BINDER_WORK_NODE: {
4095 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07004096 int strong, weak;
4097 binder_uintptr_t node_ptr = node->ptr;
4098 binder_uintptr_t node_cookie = node->cookie;
4099 int node_debug_id = node->debug_id;
4100 int has_weak_ref;
4101 int has_strong_ref;
4102 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09004103
Todd Kjos60792612017-05-24 10:51:01 -07004104 BUG_ON(proc != node->proc);
4105 strong = node->internal_strong_refs ||
4106 node->local_strong_refs;
4107 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07004108 node->local_weak_refs ||
4109 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07004110 has_strong_ref = node->has_strong_ref;
4111 has_weak_ref = node->has_weak_ref;
4112
4113 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004114 node->has_weak_ref = 1;
4115 node->pending_weak_ref = 1;
4116 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004117 }
4118 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004119 node->has_strong_ref = 1;
4120 node->pending_strong_ref = 1;
4121 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07004122 }
4123 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004124 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004125 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004126 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07004127 if (!weak && !strong) {
4128 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4129 "%d:%d node %d u%016llx c%016llx deleted\n",
4130 proc->pid, thread->pid,
4131 node_debug_id,
4132 (u64)node_ptr,
4133 (u64)node_cookie);
4134 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004135 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004136 binder_node_lock(node);
4137 /*
4138 * Acquire the node lock before freeing the
4139 * node to serialize with other threads that
4140 * may have been holding the node lock while
4141 * decrementing this node (avoids race where
4142 * this thread frees while the other thread
4143 * is unlocking the node after the final
4144 * decrement)
4145 */
4146 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004147 binder_free_node(node);
4148 } else
4149 binder_inner_proc_unlock(proc);
4150
Todd Kjos60792612017-05-24 10:51:01 -07004151 if (weak && !has_weak_ref)
4152 ret = binder_put_node_cmd(
4153 proc, thread, &ptr, node_ptr,
4154 node_cookie, node_debug_id,
4155 BR_INCREFS, "BR_INCREFS");
4156 if (!ret && strong && !has_strong_ref)
4157 ret = binder_put_node_cmd(
4158 proc, thread, &ptr, node_ptr,
4159 node_cookie, node_debug_id,
4160 BR_ACQUIRE, "BR_ACQUIRE");
4161 if (!ret && !strong && has_strong_ref)
4162 ret = binder_put_node_cmd(
4163 proc, thread, &ptr, node_ptr,
4164 node_cookie, node_debug_id,
4165 BR_RELEASE, "BR_RELEASE");
4166 if (!ret && !weak && has_weak_ref)
4167 ret = binder_put_node_cmd(
4168 proc, thread, &ptr, node_ptr,
4169 node_cookie, node_debug_id,
4170 BR_DECREFS, "BR_DECREFS");
4171 if (orig_ptr == ptr)
4172 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4173 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4174 proc->pid, thread->pid,
4175 node_debug_id,
4176 (u64)node_ptr,
4177 (u64)node_cookie);
4178 if (ret)
4179 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004180 } break;
4181 case BINDER_WORK_DEAD_BINDER:
4182 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4183 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4184 struct binder_ref_death *death;
4185 uint32_t cmd;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004186 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004187
4188 death = container_of(w, struct binder_ref_death, work);
4189 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4190 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4191 else
4192 cmd = BR_DEAD_BINDER;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004193 cookie = death->cookie;
4194
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004195 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004196 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004197 proc->pid, thread->pid,
4198 cmd == BR_DEAD_BINDER ?
4199 "BR_DEAD_BINDER" :
4200 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenf9eac642017-05-22 11:26:23 -07004201 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004202 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenf9eac642017-05-22 11:26:23 -07004203 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004204 kfree(death);
4205 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004206 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004207 binder_enqueue_work_ilocked(
4208 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004209 binder_inner_proc_unlock(proc);
4210 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07004211 if (put_user(cmd, (uint32_t __user *)ptr))
4212 return -EFAULT;
4213 ptr += sizeof(uint32_t);
4214 if (put_user(cookie,
4215 (binder_uintptr_t __user *)ptr))
4216 return -EFAULT;
4217 ptr += sizeof(binder_uintptr_t);
4218 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004219 if (cmd == BR_DEAD_BINDER)
4220 goto done; /* DEAD_BINDER notifications can cause transactions */
4221 } break;
4222 }
4223
4224 if (!t)
4225 continue;
4226
4227 BUG_ON(t->buffer == NULL);
4228 if (t->buffer->target_node) {
4229 struct binder_node *target_node = t->buffer->target_node;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004230 struct binder_priority node_prio;
Seunghun Lee10f62862014-05-01 01:30:23 +09004231
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004232 tr.target.ptr = target_node->ptr;
4233 tr.cookie = target_node->cookie;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004234 node_prio.sched_policy = target_node->sched_policy;
4235 node_prio.prio = target_node->min_priority;
Martijn Coenenc46810c2017-06-23 10:13:43 -07004236 binder_transaction_priority(current, t, node_prio,
4237 target_node->inherit_rt);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004238 cmd = BR_TRANSACTION;
4239 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004240 tr.target.ptr = 0;
4241 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004242 cmd = BR_REPLY;
4243 }
4244 tr.code = t->code;
4245 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06004246 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004247
Todd Kjos2f993e22017-05-12 14:42:55 -07004248 t_from = binder_get_txn_from(t);
4249 if (t_from) {
4250 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09004251
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004252 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08004253 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004254 } else {
4255 tr.sender_pid = 0;
4256 }
4257
4258 tr.data_size = t->buffer->data_size;
4259 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07004260 tr.data.ptr.buffer = (binder_uintptr_t)
4261 ((uintptr_t)t->buffer->data +
4262 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004263 tr.data.ptr.offsets = tr.data.ptr.buffer +
4264 ALIGN(t->buffer->data_size,
4265 sizeof(void *));
4266
Todd Kjos2f993e22017-05-12 14:42:55 -07004267 if (put_user(cmd, (uint32_t __user *)ptr)) {
4268 if (t_from)
4269 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004270
4271 binder_cleanup_transaction(t, "put_user failed",
4272 BR_FAILED_REPLY);
4273
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004274 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004275 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004276 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07004277 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4278 if (t_from)
4279 binder_thread_dec_tmpref(t_from);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004280
4281 binder_cleanup_transaction(t, "copy_to_user failed",
4282 BR_FAILED_REPLY);
4283
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004284 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004285 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004286 ptr += sizeof(tr);
4287
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004288 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004289 binder_stat_br(proc, thread, cmd);
4290 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004291 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004292 proc->pid, thread->pid,
4293 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4294 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07004295 t->debug_id, t_from ? t_from->proc->pid : 0,
4296 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004297 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004298 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004299
Todd Kjos2f993e22017-05-12 14:42:55 -07004300 if (t_from)
4301 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004302 t->buffer->allow_user_free = 1;
4303 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07004304 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004305 t->to_parent = thread->transaction_stack;
4306 t->to_thread = thread;
4307 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07004308 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004309 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07004310 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004311 }
4312 break;
4313 }
4314
4315done:
4316
4317 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07004318 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004319 if (proc->requested_threads == 0 &&
4320 list_empty(&thread->proc->waiting_threads) &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004321 proc->requested_threads_started < proc->max_threads &&
4322 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4323 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4324 /*spawn a new thread if we leave this out */) {
4325 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07004326 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004327 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304328 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004329 proc->pid, thread->pid);
4330 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4331 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07004332 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07004333 } else
4334 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004335 return 0;
4336}
4337
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004338static void binder_release_work(struct binder_proc *proc,
4339 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004340{
4341 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09004342
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004343 while (1) {
4344 w = binder_dequeue_work_head(proc, list);
4345 if (!w)
4346 return;
4347
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004348 switch (w->type) {
4349 case BINDER_WORK_TRANSACTION: {
4350 struct binder_transaction *t;
4351
4352 t = container_of(w, struct binder_transaction, work);
Martijn Coenen3217ccc2017-08-24 15:23:36 +02004353
4354 binder_cleanup_transaction(t, "process died.",
4355 BR_DEAD_REPLY);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004356 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004357 case BINDER_WORK_RETURN_ERROR: {
4358 struct binder_error *e = container_of(
4359 w, struct binder_error, work);
4360
4361 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4362 "undelivered TRANSACTION_ERROR: %u\n",
4363 e->cmd);
4364 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004365 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004366 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304367 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004368 kfree(w);
4369 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4370 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004371 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4372 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4373 struct binder_ref_death *death;
4374
4375 death = container_of(w, struct binder_ref_death, work);
4376 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004377 "undelivered death notification, %016llx\n",
4378 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004379 kfree(death);
4380 binder_stats_deleted(BINDER_STAT_DEATH);
4381 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004382 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304383 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004384 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004385 break;
4386 }
4387 }
4388
4389}
4390
Todd Kjosb4827902017-05-25 15:52:17 -07004391static struct binder_thread *binder_get_thread_ilocked(
4392 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004393{
4394 struct binder_thread *thread = NULL;
4395 struct rb_node *parent = NULL;
4396 struct rb_node **p = &proc->threads.rb_node;
4397
4398 while (*p) {
4399 parent = *p;
4400 thread = rb_entry(parent, struct binder_thread, rb_node);
4401
4402 if (current->pid < thread->pid)
4403 p = &(*p)->rb_left;
4404 else if (current->pid > thread->pid)
4405 p = &(*p)->rb_right;
4406 else
Todd Kjosb4827902017-05-25 15:52:17 -07004407 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004408 }
Todd Kjosb4827902017-05-25 15:52:17 -07004409 if (!new_thread)
4410 return NULL;
4411 thread = new_thread;
4412 binder_stats_created(BINDER_STAT_THREAD);
4413 thread->proc = proc;
4414 thread->pid = current->pid;
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004415 get_task_struct(current);
4416 thread->task = current;
Todd Kjosb4827902017-05-25 15:52:17 -07004417 atomic_set(&thread->tmp_ref, 0);
4418 init_waitqueue_head(&thread->wait);
4419 INIT_LIST_HEAD(&thread->todo);
4420 rb_link_node(&thread->rb_node, parent, p);
4421 rb_insert_color(&thread->rb_node, &proc->threads);
4422 thread->looper_need_return = true;
4423 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4424 thread->return_error.cmd = BR_OK;
4425 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4426 thread->reply_error.cmd = BR_OK;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004427 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004428 return thread;
4429}
4430
4431static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4432{
4433 struct binder_thread *thread;
4434 struct binder_thread *new_thread;
4435
4436 binder_inner_proc_lock(proc);
4437 thread = binder_get_thread_ilocked(proc, NULL);
4438 binder_inner_proc_unlock(proc);
4439 if (!thread) {
4440 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4441 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004442 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004443 binder_inner_proc_lock(proc);
4444 thread = binder_get_thread_ilocked(proc, new_thread);
4445 binder_inner_proc_unlock(proc);
4446 if (thread != new_thread)
4447 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004448 }
4449 return thread;
4450}
4451
Todd Kjos2f993e22017-05-12 14:42:55 -07004452static void binder_free_proc(struct binder_proc *proc)
4453{
4454 BUG_ON(!list_empty(&proc->todo));
4455 BUG_ON(!list_empty(&proc->delivered_death));
4456 binder_alloc_deferred_release(&proc->alloc);
4457 put_task_struct(proc->tsk);
4458 binder_stats_deleted(BINDER_STAT_PROC);
4459 kfree(proc);
4460}
4461
4462static void binder_free_thread(struct binder_thread *thread)
4463{
4464 BUG_ON(!list_empty(&thread->todo));
4465 binder_stats_deleted(BINDER_STAT_THREAD);
4466 binder_proc_dec_tmpref(thread->proc);
Martijn Coenen07a30fe2017-06-07 10:02:12 -07004467 put_task_struct(thread->task);
Todd Kjos2f993e22017-05-12 14:42:55 -07004468 kfree(thread);
4469}
4470
4471static int binder_thread_release(struct binder_proc *proc,
4472 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004473{
4474 struct binder_transaction *t;
4475 struct binder_transaction *send_reply = NULL;
4476 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004477 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004478
Todd Kjosb4827902017-05-25 15:52:17 -07004479 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004480 /*
4481 * take a ref on the proc so it survives
4482 * after we remove this thread from proc->threads.
4483 * The corresponding dec is when we actually
4484 * free the thread in binder_free_thread()
4485 */
4486 proc->tmp_ref++;
4487 /*
4488 * take a ref on this thread to ensure it
4489 * survives while we are releasing it
4490 */
4491 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004492 rb_erase(&thread->rb_node, &proc->threads);
4493 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004494 if (t) {
4495 spin_lock(&t->lock);
4496 if (t->to_thread == thread)
4497 send_reply = t;
4498 }
4499 thread->is_dead = true;
4500
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004501 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004502 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004503 active_transactions++;
4504 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304505 "release %d:%d transaction %d %s, still active\n",
4506 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004507 t->debug_id,
4508 (t->to_thread == thread) ? "in" : "out");
4509
4510 if (t->to_thread == thread) {
4511 t->to_proc = NULL;
4512 t->to_thread = NULL;
4513 if (t->buffer) {
4514 t->buffer->transaction = NULL;
4515 t->buffer = NULL;
4516 }
4517 t = t->to_parent;
4518 } else if (t->from == thread) {
4519 t->from = NULL;
4520 t = t->from_parent;
4521 } else
4522 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004523 spin_unlock(&last_t->lock);
4524 if (t)
4525 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004526 }
Todd Kjosb4827902017-05-25 15:52:17 -07004527 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004528
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004529 if (send_reply)
4530 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004531 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004532 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004533 return active_transactions;
4534}
4535
4536static unsigned int binder_poll(struct file *filp,
4537 struct poll_table_struct *wait)
4538{
4539 struct binder_proc *proc = filp->private_data;
4540 struct binder_thread *thread = NULL;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004541 bool wait_for_proc_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004542
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004543 thread = binder_get_thread(proc);
4544
Martijn Coenen995a36e2017-06-02 13:36:52 -07004545 binder_inner_proc_lock(thread->proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004546 thread->looper |= BINDER_LOOPER_STATE_POLL;
4547 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4548
Martijn Coenen995a36e2017-06-02 13:36:52 -07004549 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004550
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004551 poll_wait(filp, &thread->wait, wait);
4552
Martijn Coenen47810932017-08-10 12:32:00 +02004553 if (binder_has_work(thread, wait_for_proc_work))
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004554 return POLLIN;
4555
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004556 return 0;
4557}
4558
Tair Rzayev78260ac2014-06-03 22:27:21 +03004559static int binder_ioctl_write_read(struct file *filp,
4560 unsigned int cmd, unsigned long arg,
4561 struct binder_thread *thread)
4562{
4563 int ret = 0;
4564 struct binder_proc *proc = filp->private_data;
4565 unsigned int size = _IOC_SIZE(cmd);
4566 void __user *ubuf = (void __user *)arg;
4567 struct binder_write_read bwr;
4568
4569 if (size != sizeof(struct binder_write_read)) {
4570 ret = -EINVAL;
4571 goto out;
4572 }
4573 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4574 ret = -EFAULT;
4575 goto out;
4576 }
4577 binder_debug(BINDER_DEBUG_READ_WRITE,
4578 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4579 proc->pid, thread->pid,
4580 (u64)bwr.write_size, (u64)bwr.write_buffer,
4581 (u64)bwr.read_size, (u64)bwr.read_buffer);
4582
4583 if (bwr.write_size > 0) {
4584 ret = binder_thread_write(proc, thread,
4585 bwr.write_buffer,
4586 bwr.write_size,
4587 &bwr.write_consumed);
4588 trace_binder_write_done(ret);
4589 if (ret < 0) {
4590 bwr.read_consumed = 0;
4591 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4592 ret = -EFAULT;
4593 goto out;
4594 }
4595 }
4596 if (bwr.read_size > 0) {
4597 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4598 bwr.read_size,
4599 &bwr.read_consumed,
4600 filp->f_flags & O_NONBLOCK);
4601 trace_binder_read_done(ret);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004602 binder_inner_proc_lock(proc);
4603 if (!binder_worklist_empty_ilocked(&proc->todo))
Martijn Coenen053be422017-06-06 15:17:46 -07004604 binder_wakeup_proc_ilocked(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004605 binder_inner_proc_unlock(proc);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004606 if (ret < 0) {
4607 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4608 ret = -EFAULT;
4609 goto out;
4610 }
4611 }
4612 binder_debug(BINDER_DEBUG_READ_WRITE,
4613 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4614 proc->pid, thread->pid,
4615 (u64)bwr.write_consumed, (u64)bwr.write_size,
4616 (u64)bwr.read_consumed, (u64)bwr.read_size);
4617 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4618 ret = -EFAULT;
4619 goto out;
4620 }
4621out:
4622 return ret;
4623}
4624
4625static int binder_ioctl_set_ctx_mgr(struct file *filp)
4626{
4627 int ret = 0;
4628 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004629 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004630 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004631 kuid_t curr_euid = current_euid();
4632
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004633 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004634 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004635 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4636 ret = -EBUSY;
4637 goto out;
4638 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004639 ret = security_binder_set_context_mgr(proc->tsk);
4640 if (ret < 0)
4641 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004642 if (uid_valid(context->binder_context_mgr_uid)) {
4643 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004644 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4645 from_kuid(&init_user_ns, curr_euid),
4646 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004647 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004648 ret = -EPERM;
4649 goto out;
4650 }
4651 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004652 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004653 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004654 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004655 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004656 ret = -ENOMEM;
4657 goto out;
4658 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004659 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004660 new_node->local_weak_refs++;
4661 new_node->local_strong_refs++;
4662 new_node->has_strong_ref = 1;
4663 new_node->has_weak_ref = 1;
4664 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004665 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004666 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004667out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004668 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004669 return ret;
4670}
4671
Colin Cross833babb32017-06-20 13:54:44 -07004672static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4673 struct binder_node_debug_info *info) {
4674 struct rb_node *n;
4675 binder_uintptr_t ptr = info->ptr;
4676
4677 memset(info, 0, sizeof(*info));
4678
4679 binder_inner_proc_lock(proc);
4680 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4681 struct binder_node *node = rb_entry(n, struct binder_node,
4682 rb_node);
4683 if (node->ptr > ptr) {
4684 info->ptr = node->ptr;
4685 info->cookie = node->cookie;
4686 info->has_strong_ref = node->has_strong_ref;
4687 info->has_weak_ref = node->has_weak_ref;
4688 break;
4689 }
4690 }
4691 binder_inner_proc_unlock(proc);
4692
4693 return 0;
4694}
4695
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004696static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4697{
4698 int ret;
4699 struct binder_proc *proc = filp->private_data;
4700 struct binder_thread *thread;
4701 unsigned int size = _IOC_SIZE(cmd);
4702 void __user *ubuf = (void __user *)arg;
4703
Tair Rzayev78260ac2014-06-03 22:27:21 +03004704 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4705 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004706
Sherry Yang435416b2017-06-22 14:37:45 -07004707 binder_selftest_alloc(&proc->alloc);
4708
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004709 trace_binder_ioctl(cmd, arg);
4710
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004711 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4712 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004713 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004714
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004715 thread = binder_get_thread(proc);
4716 if (thread == NULL) {
4717 ret = -ENOMEM;
4718 goto err;
4719 }
4720
4721 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004722 case BINDER_WRITE_READ:
4723 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4724 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004725 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004726 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004727 case BINDER_SET_MAX_THREADS: {
4728 int max_threads;
4729
4730 if (copy_from_user(&max_threads, ubuf,
4731 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004732 ret = -EINVAL;
4733 goto err;
4734 }
Todd Kjosd600e902017-05-25 17:35:02 -07004735 binder_inner_proc_lock(proc);
4736 proc->max_threads = max_threads;
4737 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004738 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004739 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004740 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004741 ret = binder_ioctl_set_ctx_mgr(filp);
4742 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004743 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004744 break;
4745 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304746 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004747 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004748 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004749 thread = NULL;
4750 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004751 case BINDER_VERSION: {
4752 struct binder_version __user *ver = ubuf;
4753
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004754 if (size != sizeof(struct binder_version)) {
4755 ret = -EINVAL;
4756 goto err;
4757 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004758 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4759 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004760 ret = -EINVAL;
4761 goto err;
4762 }
4763 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004764 }
Colin Cross833babb32017-06-20 13:54:44 -07004765 case BINDER_GET_NODE_DEBUG_INFO: {
4766 struct binder_node_debug_info info;
4767
4768 if (copy_from_user(&info, ubuf, sizeof(info))) {
4769 ret = -EFAULT;
4770 goto err;
4771 }
4772
4773 ret = binder_ioctl_get_node_debug_info(proc, &info);
4774 if (ret < 0)
4775 goto err;
4776
4777 if (copy_to_user(ubuf, &info, sizeof(info))) {
4778 ret = -EFAULT;
4779 goto err;
4780 }
4781 break;
4782 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004783 default:
4784 ret = -EINVAL;
4785 goto err;
4786 }
4787 ret = 0;
4788err:
4789 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004790 thread->looper_need_return = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004791 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4792 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304793 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004794err_unlocked:
4795 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004796 return ret;
4797}
4798
4799static void binder_vma_open(struct vm_area_struct *vma)
4800{
4801 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004802
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004803 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304804 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004805 proc->pid, vma->vm_start, vma->vm_end,
4806 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4807 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004808}
4809
4810static void binder_vma_close(struct vm_area_struct *vma)
4811{
4812 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004813
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004814 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304815 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004816 proc->pid, vma->vm_start, vma->vm_end,
4817 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4818 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004819 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004820 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4821}
4822
Vinayak Menonddac7d52014-06-02 18:17:59 +05304823static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4824{
4825 return VM_FAULT_SIGBUS;
4826}
4827
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004828static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004829 .open = binder_vma_open,
4830 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304831 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004832};
4833
Todd Kjosd325d372016-10-10 10:40:53 -07004834static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4835{
4836 int ret;
4837 struct binder_proc *proc = filp->private_data;
4838 const char *failure_string;
4839
4840 if (proc->tsk != current->group_leader)
4841 return -EINVAL;
4842
4843 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4844 vma->vm_end = vma->vm_start + SZ_4M;
4845
4846 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4847 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4848 __func__, proc->pid, vma->vm_start, vma->vm_end,
4849 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4850 (unsigned long)pgprot_val(vma->vm_page_prot));
4851
4852 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4853 ret = -EPERM;
4854 failure_string = "bad vm_flags";
4855 goto err_bad_arg;
4856 }
4857 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4858 vma->vm_ops = &binder_vm_ops;
4859 vma->vm_private_data = proc;
4860
4861 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4862 if (ret)
4863 return ret;
4864 proc->files = get_files_struct(current);
4865 return 0;
4866
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004867err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004868 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004869 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4870 return ret;
4871}
4872
4873static int binder_open(struct inode *nodp, struct file *filp)
4874{
4875 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004876 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004877
4878 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4879 current->group_leader->pid, current->pid);
4880
4881 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4882 if (proc == NULL)
4883 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004884 spin_lock_init(&proc->inner_lock);
4885 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004886 get_task_struct(current->group_leader);
4887 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004888 INIT_LIST_HEAD(&proc->todo);
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004889 if (binder_supported_policy(current->policy)) {
4890 proc->default_priority.sched_policy = current->policy;
4891 proc->default_priority.prio = current->normal_prio;
4892 } else {
4893 proc->default_priority.sched_policy = SCHED_NORMAL;
4894 proc->default_priority.prio = NICE_TO_PRIO(0);
4895 }
4896
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004897 binder_dev = container_of(filp->private_data, struct binder_device,
4898 miscdev);
4899 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004900 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004901
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004902 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004903 proc->pid = current->group_leader->pid;
4904 INIT_LIST_HEAD(&proc->delivered_death);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004905 INIT_LIST_HEAD(&proc->waiting_threads);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004906 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004907
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004908 mutex_lock(&binder_procs_lock);
4909 hlist_add_head(&proc->proc_node, &binder_procs);
4910 mutex_unlock(&binder_procs_lock);
4911
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004912 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004913 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004914
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004915 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004916 /*
4917 * proc debug entries are shared between contexts, so
4918 * this will fail if the process tries to open the driver
4919 * again with a different context. The priting code will
4920 * anyway print all contexts that a given PID has, so this
4921 * is not a problem.
4922 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004923 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004924 binder_debugfs_dir_entry_proc,
4925 (void *)(unsigned long)proc->pid,
4926 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004927 }
4928
4929 return 0;
4930}
4931
4932static int binder_flush(struct file *filp, fl_owner_t id)
4933{
4934 struct binder_proc *proc = filp->private_data;
4935
4936 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4937
4938 return 0;
4939}
4940
4941static void binder_deferred_flush(struct binder_proc *proc)
4942{
4943 struct rb_node *n;
4944 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004945
Todd Kjosb4827902017-05-25 15:52:17 -07004946 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004947 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4948 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004949
Todd Kjos6798e6d2017-01-06 14:19:25 -08004950 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004951 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4952 wake_up_interruptible(&thread->wait);
4953 wake_count++;
4954 }
4955 }
Todd Kjosb4827902017-05-25 15:52:17 -07004956 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004957
4958 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4959 "binder_flush: %d woke %d threads\n", proc->pid,
4960 wake_count);
4961}
4962
4963static int binder_release(struct inode *nodp, struct file *filp)
4964{
4965 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004966
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004967 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004968 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4969
4970 return 0;
4971}
4972
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004973static int binder_node_release(struct binder_node *node, int refs)
4974{
4975 struct binder_ref *ref;
4976 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004977 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004978
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004979 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004980
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004981 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004982 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004983 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004984 /*
4985 * The caller must have taken a temporary ref on the node,
4986 */
4987 BUG_ON(!node->tmp_refs);
4988 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004989 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004990 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004991 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004992
4993 return refs;
4994 }
4995
4996 node->proc = NULL;
4997 node->local_strong_refs = 0;
4998 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004999 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005000
5001 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005002 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005003 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005004
5005 hlist_for_each_entry(ref, &node->refs, node_entry) {
5006 refs++;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005007 /*
5008 * Need the node lock to synchronize
5009 * with new notification requests and the
5010 * inner lock to synchronize with queued
5011 * death notifications.
5012 */
5013 binder_inner_proc_lock(ref->proc);
5014 if (!ref->death) {
5015 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08005016 continue;
Martijn Coenenf9eac642017-05-22 11:26:23 -07005017 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005018
5019 death++;
5020
Martijn Coenenf9eac642017-05-22 11:26:23 -07005021 BUG_ON(!list_empty(&ref->death->work.entry));
5022 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5023 binder_enqueue_work_ilocked(&ref->death->work,
5024 &ref->proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07005025 binder_wakeup_proc_ilocked(ref->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005026 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005027 }
5028
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005029 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5030 "node %d now dead, refs %d, death %d\n",
5031 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005032 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07005033 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005034
5035 return refs;
5036}
5037
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005038static void binder_deferred_release(struct binder_proc *proc)
5039{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005040 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005041 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07005042 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005043
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005044 BUG_ON(proc->files);
5045
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005046 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005047 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005048 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005049
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005050 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005051 if (context->binder_context_mgr_node &&
5052 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005053 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005054 "%s: %d context_mgr_node gone\n",
5055 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02005056 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005057 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005058 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07005059 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07005060 /*
5061 * Make sure proc stays alive after we
5062 * remove all the threads
5063 */
5064 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005065
Todd Kjos2f993e22017-05-12 14:42:55 -07005066 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005067 threads = 0;
5068 active_transactions = 0;
5069 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005070 struct binder_thread *thread;
5071
5072 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07005073 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005074 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07005075 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07005076 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005077 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005078
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005079 nodes = 0;
5080 incoming_refs = 0;
5081 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005082 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005083
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005084 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005085 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07005086 /*
5087 * take a temporary ref on the node before
5088 * calling binder_node_release() which will either
5089 * kfree() the node or call binder_put_node()
5090 */
Todd Kjos425d23f2017-06-12 12:07:26 -07005091 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005092 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07005093 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005094 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07005095 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005096 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005097 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005098
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005099 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005100 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005101 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005102 struct binder_ref *ref;
5103
5104 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005105 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07005106 binder_cleanup_ref_olocked(ref);
5107 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005108 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07005109 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005110 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005111 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005112
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005113 binder_release_work(proc, &proc->todo);
5114 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005115
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005116 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07005117 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005118 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07005119 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005120
Todd Kjos2f993e22017-05-12 14:42:55 -07005121 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005122}
5123
5124static void binder_deferred_func(struct work_struct *work)
5125{
5126 struct binder_proc *proc;
5127 struct files_struct *files;
5128
5129 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09005130
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005131 do {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005132 mutex_lock(&binder_deferred_lock);
5133 if (!hlist_empty(&binder_deferred_list)) {
5134 proc = hlist_entry(binder_deferred_list.first,
5135 struct binder_proc, deferred_work_node);
5136 hlist_del_init(&proc->deferred_work_node);
5137 defer = proc->deferred_work;
5138 proc->deferred_work = 0;
5139 } else {
5140 proc = NULL;
5141 defer = 0;
5142 }
5143 mutex_unlock(&binder_deferred_lock);
5144
5145 files = NULL;
5146 if (defer & BINDER_DEFERRED_PUT_FILES) {
5147 files = proc->files;
5148 if (files)
5149 proc->files = NULL;
5150 }
5151
5152 if (defer & BINDER_DEFERRED_FLUSH)
5153 binder_deferred_flush(proc);
5154
5155 if (defer & BINDER_DEFERRED_RELEASE)
5156 binder_deferred_release(proc); /* frees proc */
5157
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005158 if (files)
5159 put_files_struct(files);
5160 } while (proc);
5161}
5162static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5163
5164static void
5165binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5166{
5167 mutex_lock(&binder_deferred_lock);
5168 proc->deferred_work |= defer;
5169 if (hlist_unhashed(&proc->deferred_work_node)) {
5170 hlist_add_head(&proc->deferred_work_node,
5171 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05305172 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005173 }
5174 mutex_unlock(&binder_deferred_lock);
5175}
5176
Todd Kjos6d241a42017-04-21 14:32:11 -07005177static void print_binder_transaction_ilocked(struct seq_file *m,
5178 struct binder_proc *proc,
5179 const char *prefix,
5180 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005181{
Todd Kjos6d241a42017-04-21 14:32:11 -07005182 struct binder_proc *to_proc;
5183 struct binder_buffer *buffer = t->buffer;
5184
Todd Kjos2f993e22017-05-12 14:42:55 -07005185 spin_lock(&t->lock);
Todd Kjos6d241a42017-04-21 14:32:11 -07005186 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005187 seq_printf(m,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005188 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005189 prefix, t->debug_id, t,
5190 t->from ? t->from->proc->pid : 0,
5191 t->from ? t->from->pid : 0,
Todd Kjos6d241a42017-04-21 14:32:11 -07005192 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005193 t->to_thread ? t->to_thread->pid : 0,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07005194 t->code, t->flags, t->priority.sched_policy,
5195 t->priority.prio, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07005196 spin_unlock(&t->lock);
5197
Todd Kjos6d241a42017-04-21 14:32:11 -07005198 if (proc != to_proc) {
5199 /*
5200 * Can only safely deref buffer if we are holding the
5201 * correct proc inner lock for this node
5202 */
5203 seq_puts(m, "\n");
5204 return;
5205 }
5206
5207 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005208 seq_puts(m, " buffer free\n");
5209 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005210 }
Todd Kjos6d241a42017-04-21 14:32:11 -07005211 if (buffer->target_node)
5212 seq_printf(m, " node %d", buffer->target_node->debug_id);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005213 seq_printf(m, " size %zd:%zd data %p\n",
Todd Kjos6d241a42017-04-21 14:32:11 -07005214 buffer->data_size, buffer->offsets_size,
5215 buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005216}
5217
Todd Kjos6d241a42017-04-21 14:32:11 -07005218static void print_binder_work_ilocked(struct seq_file *m,
5219 struct binder_proc *proc,
5220 const char *prefix,
5221 const char *transaction_prefix,
5222 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005223{
5224 struct binder_node *node;
5225 struct binder_transaction *t;
5226
5227 switch (w->type) {
5228 case BINDER_WORK_TRANSACTION:
5229 t = container_of(w, struct binder_transaction, work);
Todd Kjos6d241a42017-04-21 14:32:11 -07005230 print_binder_transaction_ilocked(
5231 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005232 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07005233 case BINDER_WORK_RETURN_ERROR: {
5234 struct binder_error *e = container_of(
5235 w, struct binder_error, work);
5236
5237 seq_printf(m, "%stransaction error: %u\n",
5238 prefix, e->cmd);
5239 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005240 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005241 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005242 break;
5243 case BINDER_WORK_NODE:
5244 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08005245 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5246 prefix, node->debug_id,
5247 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005248 break;
5249 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005250 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005251 break;
5252 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005253 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005254 break;
5255 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005256 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005257 break;
5258 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005259 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005260 break;
5261 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005262}
5263
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005264static void print_binder_thread_ilocked(struct seq_file *m,
5265 struct binder_thread *thread,
5266 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005267{
5268 struct binder_transaction *t;
5269 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005270 size_t start_pos = m->count;
5271 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005272
Todd Kjos2f993e22017-05-12 14:42:55 -07005273 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08005274 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07005275 thread->looper_need_return,
5276 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005277 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005278 t = thread->transaction_stack;
5279 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005280 if (t->from == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005281 print_binder_transaction_ilocked(m, thread->proc,
5282 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005283 t = t->from_parent;
5284 } else if (t->to_thread == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005285 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005286 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005287 t = t->to_parent;
5288 } else {
Todd Kjos6d241a42017-04-21 14:32:11 -07005289 print_binder_transaction_ilocked(m, thread->proc,
5290 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005291 t = NULL;
5292 }
5293 }
5294 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005295 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005296 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005297 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005298 if (!print_always && m->count == header_pos)
5299 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005300}
5301
Todd Kjos425d23f2017-06-12 12:07:26 -07005302static void print_binder_node_nilocked(struct seq_file *m,
5303 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005304{
5305 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005306 struct binder_work *w;
5307 int count;
5308
5309 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08005310 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005311 count++;
5312
Martijn Coenen6aac9792017-06-07 09:29:14 -07005313 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08005314 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Martijn Coenen6aac9792017-06-07 09:29:14 -07005315 node->sched_policy, node->min_priority,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005316 node->has_strong_ref, node->has_weak_ref,
5317 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07005318 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005319 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005320 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08005321 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005322 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005323 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005324 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005325 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005326 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005327 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005328 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005329 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005330}
5331
Todd Kjos5346bf32016-10-20 16:43:34 -07005332static void print_binder_ref_olocked(struct seq_file *m,
5333 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005334{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005335 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005336 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5337 ref->data.debug_id, ref->data.desc,
5338 ref->node->proc ? "" : "dead ",
5339 ref->node->debug_id, ref->data.strong,
5340 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005341 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005342}
5343
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005344static void print_binder_proc(struct seq_file *m,
5345 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005346{
5347 struct binder_work *w;
5348 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005349 size_t start_pos = m->count;
5350 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07005351 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005352
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005353 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005354 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005355 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005356
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005357 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005358 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005359 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005360 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07005361
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005362 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005363 struct binder_node *node = rb_entry(n, struct binder_node,
5364 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005365 /*
5366 * take a temporary reference on the node so it
5367 * survives and isn't removed from the tree
5368 * while we print it.
5369 */
5370 binder_inc_node_tmpref_ilocked(node);
5371 /* Need to drop inner lock to take node lock */
5372 binder_inner_proc_unlock(proc);
5373 if (last_node)
5374 binder_put_node(last_node);
5375 binder_node_inner_lock(node);
5376 print_binder_node_nilocked(m, node);
5377 binder_node_inner_unlock(node);
5378 last_node = node;
5379 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005380 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005381 binder_inner_proc_unlock(proc);
5382 if (last_node)
5383 binder_put_node(last_node);
5384
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005385 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07005386 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005387 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005388 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005389 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07005390 print_binder_ref_olocked(m, rb_entry(n,
5391 struct binder_ref,
5392 rb_node_desc));
5393 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005394 }
Todd Kjosd325d372016-10-10 10:40:53 -07005395 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005396 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005397 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005398 print_binder_work_ilocked(m, proc, " ",
5399 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005400 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005401 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005402 break;
5403 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005404 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005405 if (!print_all && m->count == header_pos)
5406 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005407}
5408
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005409static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005410 "BR_ERROR",
5411 "BR_OK",
5412 "BR_TRANSACTION",
5413 "BR_REPLY",
5414 "BR_ACQUIRE_RESULT",
5415 "BR_DEAD_REPLY",
5416 "BR_TRANSACTION_COMPLETE",
5417 "BR_INCREFS",
5418 "BR_ACQUIRE",
5419 "BR_RELEASE",
5420 "BR_DECREFS",
5421 "BR_ATTEMPT_ACQUIRE",
5422 "BR_NOOP",
5423 "BR_SPAWN_LOOPER",
5424 "BR_FINISHED",
5425 "BR_DEAD_BINDER",
5426 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5427 "BR_FAILED_REPLY"
5428};
5429
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005430static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005431 "BC_TRANSACTION",
5432 "BC_REPLY",
5433 "BC_ACQUIRE_RESULT",
5434 "BC_FREE_BUFFER",
5435 "BC_INCREFS",
5436 "BC_ACQUIRE",
5437 "BC_RELEASE",
5438 "BC_DECREFS",
5439 "BC_INCREFS_DONE",
5440 "BC_ACQUIRE_DONE",
5441 "BC_ATTEMPT_ACQUIRE",
5442 "BC_REGISTER_LOOPER",
5443 "BC_ENTER_LOOPER",
5444 "BC_EXIT_LOOPER",
5445 "BC_REQUEST_DEATH_NOTIFICATION",
5446 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02005447 "BC_DEAD_BINDER_DONE",
5448 "BC_TRANSACTION_SG",
5449 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005450};
5451
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005452static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005453 "proc",
5454 "thread",
5455 "node",
5456 "ref",
5457 "death",
5458 "transaction",
5459 "transaction_complete"
5460};
5461
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005462static void print_binder_stats(struct seq_file *m, const char *prefix,
5463 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005464{
5465 int i;
5466
5467 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005468 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005469 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005470 int temp = atomic_read(&stats->bc[i]);
5471
5472 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005473 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005474 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005475 }
5476
5477 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005478 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005479 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005480 int temp = atomic_read(&stats->br[i]);
5481
5482 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005483 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005484 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005485 }
5486
5487 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005488 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005489 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005490 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005491 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005492 int created = atomic_read(&stats->obj_created[i]);
5493 int deleted = atomic_read(&stats->obj_deleted[i]);
5494
5495 if (created || deleted)
5496 seq_printf(m, "%s%s: active %d total %d\n",
5497 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005498 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005499 created - deleted,
5500 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005501 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005502}
5503
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005504static void print_binder_proc_stats(struct seq_file *m,
5505 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005506{
5507 struct binder_work *w;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005508 struct binder_thread *thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005509 struct rb_node *n;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005510 int count, strong, weak, ready_threads;
Todd Kjosb4827902017-05-25 15:52:17 -07005511 size_t free_async_space =
5512 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005513
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005514 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005515 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005516 count = 0;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005517 ready_threads = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005518 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005519 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5520 count++;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005521
5522 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5523 ready_threads++;
5524
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005525 seq_printf(m, " threads: %d\n", count);
5526 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005527 " ready threads %d\n"
5528 " free async space %zd\n", proc->requested_threads,
5529 proc->requested_threads_started, proc->max_threads,
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005530 ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005531 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005532 count = 0;
5533 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5534 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005535 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005536 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005537 count = 0;
5538 strong = 0;
5539 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005540 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005541 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5542 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5543 rb_node_desc);
5544 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005545 strong += ref->data.strong;
5546 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005547 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005548 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005549 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005550
Todd Kjosd325d372016-10-10 10:40:53 -07005551 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005552 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005553
Sherry Yang91004422017-08-22 17:26:57 -07005554 binder_alloc_print_pages(m, &proc->alloc);
5555
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005556 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005557 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005558 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005559 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005560 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005561 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005562 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005563 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005564
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005565 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005566}
5567
5568
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005569static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005570{
5571 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005572 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005573 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005574
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005575 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005576
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005577 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005578 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005579 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005580 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5581 /*
5582 * take a temporary reference on the node so it
5583 * survives and isn't removed from the list
5584 * while we print it.
5585 */
5586 node->tmp_refs++;
5587 spin_unlock(&binder_dead_nodes_lock);
5588 if (last_node)
5589 binder_put_node(last_node);
5590 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005591 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005592 binder_node_unlock(node);
5593 last_node = node;
5594 spin_lock(&binder_dead_nodes_lock);
5595 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005596 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005597 if (last_node)
5598 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005599
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005600 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005601 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005602 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005603 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005604
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005605 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005606}
5607
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005608static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005609{
5610 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005611
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005612 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005613
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005614 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005615
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005616 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005617 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005618 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005619 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005620
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005621 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005622}
5623
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005624static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005625{
5626 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005627
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005628 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005629 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005630 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005631 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005632 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005633
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005634 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005635}
5636
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005637static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005638{
Riley Andrews83050a42016-02-09 21:05:33 -08005639 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005640 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005641
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005642 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005643 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005644 if (itr->pid == pid) {
5645 seq_puts(m, "binder proc state:\n");
5646 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005647 }
5648 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005649 mutex_unlock(&binder_procs_lock);
5650
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005651 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005652}
5653
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005654static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005655 struct binder_transaction_log_entry *e)
5656{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005657 int debug_id = READ_ONCE(e->debug_id_done);
5658 /*
5659 * read barrier to guarantee debug_id_done read before
5660 * we print the log values
5661 */
5662 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005663 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005664 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005665 e->debug_id, (e->call_type == 2) ? "reply" :
5666 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005667 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005668 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5669 e->return_error, e->return_error_param,
5670 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005671 /*
5672 * read-barrier to guarantee read of debug_id_done after
5673 * done printing the fields of the entry
5674 */
5675 smp_rmb();
5676 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5677 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005678}
5679
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005680static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005681{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005682 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005683 unsigned int log_cur = atomic_read(&log->cur);
5684 unsigned int count;
5685 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005686 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005687
Todd Kjos1cfe6272017-05-24 13:33:28 -07005688 count = log_cur + 1;
5689 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5690 0 : count % ARRAY_SIZE(log->entry);
5691 if (count > ARRAY_SIZE(log->entry) || log->full)
5692 count = ARRAY_SIZE(log->entry);
5693 for (i = 0; i < count; i++) {
5694 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5695
5696 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005697 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005698 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005699}
5700
5701static const struct file_operations binder_fops = {
5702 .owner = THIS_MODULE,
5703 .poll = binder_poll,
5704 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005705 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005706 .mmap = binder_mmap,
5707 .open = binder_open,
5708 .flush = binder_flush,
5709 .release = binder_release,
5710};
5711
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005712BINDER_DEBUG_ENTRY(state);
5713BINDER_DEBUG_ENTRY(stats);
5714BINDER_DEBUG_ENTRY(transactions);
5715BINDER_DEBUG_ENTRY(transaction_log);
5716
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005717static int __init init_binder_device(const char *name)
5718{
5719 int ret;
5720 struct binder_device *binder_device;
5721
5722 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5723 if (!binder_device)
5724 return -ENOMEM;
5725
5726 binder_device->miscdev.fops = &binder_fops;
5727 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5728 binder_device->miscdev.name = name;
5729
5730 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5731 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005732 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005733
5734 ret = misc_register(&binder_device->miscdev);
5735 if (ret < 0) {
5736 kfree(binder_device);
5737 return ret;
5738 }
5739
5740 hlist_add_head(&binder_device->hlist, &binder_devices);
5741
5742 return ret;
5743}
5744
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005745static int __init binder_init(void)
5746{
5747 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005748 char *device_name, *device_names;
5749 struct binder_device *device;
5750 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005751
Sherry Yang5828d702017-07-29 13:24:11 -07005752 binder_alloc_shrinker_init();
5753
Todd Kjos1cfe6272017-05-24 13:33:28 -07005754 atomic_set(&binder_transaction_log.cur, ~0U);
5755 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5756
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005757 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5758 if (binder_debugfs_dir_entry_root)
5759 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5760 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005761
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005762 if (binder_debugfs_dir_entry_root) {
5763 debugfs_create_file("state",
5764 S_IRUGO,
5765 binder_debugfs_dir_entry_root,
5766 NULL,
5767 &binder_state_fops);
5768 debugfs_create_file("stats",
5769 S_IRUGO,
5770 binder_debugfs_dir_entry_root,
5771 NULL,
5772 &binder_stats_fops);
5773 debugfs_create_file("transactions",
5774 S_IRUGO,
5775 binder_debugfs_dir_entry_root,
5776 NULL,
5777 &binder_transactions_fops);
5778 debugfs_create_file("transaction_log",
5779 S_IRUGO,
5780 binder_debugfs_dir_entry_root,
5781 &binder_transaction_log,
5782 &binder_transaction_log_fops);
5783 debugfs_create_file("failed_transaction_log",
5784 S_IRUGO,
5785 binder_debugfs_dir_entry_root,
5786 &binder_transaction_log_failed,
5787 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005788 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005789
5790 /*
5791 * Copy the module_parameter string, because we don't want to
5792 * tokenize it in-place.
5793 */
5794 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5795 if (!device_names) {
5796 ret = -ENOMEM;
5797 goto err_alloc_device_names_failed;
5798 }
5799 strcpy(device_names, binder_devices_param);
5800
5801 while ((device_name = strsep(&device_names, ","))) {
5802 ret = init_binder_device(device_name);
5803 if (ret)
5804 goto err_init_binder_device_failed;
5805 }
5806
5807 return ret;
5808
5809err_init_binder_device_failed:
5810 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5811 misc_deregister(&device->miscdev);
5812 hlist_del(&device->hlist);
5813 kfree(device);
5814 }
5815err_alloc_device_names_failed:
5816 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5817
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005818 return ret;
5819}
5820
5821device_initcall(binder_init);
5822
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005823#define CREATE_TRACE_POINTS
5824#include "binder_trace.h"
5825
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005826MODULE_LICENSE("GPL v2");