blob: c99b4497934dd11c85166f40a0f6eea6ddf84d39 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
Martijn Coenen22d64e4322017-06-02 11:15:44 -070031 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
Todd Kjosfc7a7e22017-05-29 16:44:24 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000057#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090058#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070065#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090066#include <linux/rbtree.h>
67#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070072#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090073
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020074#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070079#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070080#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090081
Todd Kjos8d9f6f32016-10-17 12:33:15 -070082static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090083static DEFINE_MUTEX(binder_deferred_lock);
84
Martijn Coenen6b7c7122016-09-30 16:08:09 +020085static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090086static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070087static DEFINE_MUTEX(binder_procs_lock);
88
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090089static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070090static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090091
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070092static struct dentry *binder_debugfs_dir_entry_root;
93static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070094static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090095
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070096#define BINDER_DEBUG_ENTRY(name) \
97static int binder_##name##_open(struct inode *inode, struct file *file) \
98{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070099 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700100} \
101\
102static const struct file_operations binder_##name##_fops = { \
103 .owner = THIS_MODULE, \
104 .open = binder_##name##_open, \
105 .read = seq_read, \
106 .llseek = seq_lseek, \
107 .release = single_release, \
108}
109
110static int binder_proc_show(struct seq_file *m, void *unused);
111BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900112
113/* This is only defined in include/asm-arm/sizes.h */
114#ifndef SZ_1K
115#define SZ_1K 0x400
116#endif
117
118#ifndef SZ_4M
119#define SZ_4M 0x400000
120#endif
121
122#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
123
124#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
125
126enum {
127 BINDER_DEBUG_USER_ERROR = 1U << 0,
128 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
129 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
130 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
131 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
132 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
133 BINDER_DEBUG_READ_WRITE = 1U << 6,
134 BINDER_DEBUG_USER_REFS = 1U << 7,
135 BINDER_DEBUG_THREADS = 1U << 8,
136 BINDER_DEBUG_TRANSACTION = 1U << 9,
137 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
138 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
139 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700140 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700141 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900142};
143static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
144 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
145module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
146
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200147static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
148module_param_named(devices, binder_devices_param, charp, S_IRUGO);
149
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900150static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
151static int binder_stop_on_user_error;
152
153static int binder_set_stop_on_user_error(const char *val,
154 struct kernel_param *kp)
155{
156 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900157
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900158 ret = param_set_int(val, kp);
159 if (binder_stop_on_user_error < 2)
160 wake_up(&binder_user_error_wait);
161 return ret;
162}
163module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
164 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
165
166#define binder_debug(mask, x...) \
167 do { \
168 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400169 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900170 } while (0)
171
172#define binder_user_error(x...) \
173 do { \
174 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400175 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900176 if (binder_stop_on_user_error) \
177 binder_stop_on_user_error = 2; \
178 } while (0)
179
Martijn Coenen00c80372016-07-13 12:06:49 +0200180#define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
182
183#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184
Martijn Coenen5a6da532016-09-30 14:10:07 +0200185#define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
187
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200188#define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
190
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900191enum binder_stat_types {
192 BINDER_STAT_PROC,
193 BINDER_STAT_THREAD,
194 BINDER_STAT_NODE,
195 BINDER_STAT_REF,
196 BINDER_STAT_DEATH,
197 BINDER_STAT_TRANSACTION,
198 BINDER_STAT_TRANSACTION_COMPLETE,
199 BINDER_STAT_COUNT
200};
201
202struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700203 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
204 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
205 atomic_t obj_created[BINDER_STAT_COUNT];
206 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900207};
208
209static struct binder_stats binder_stats;
210
211static inline void binder_stats_deleted(enum binder_stat_types type)
212{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700213 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900214}
215
216static inline void binder_stats_created(enum binder_stat_types type)
217{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700218 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900219}
220
221struct binder_transaction_log_entry {
222 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700223 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900224 int call_type;
225 int from_proc;
226 int from_thread;
227 int target_handle;
228 int to_proc;
229 int to_thread;
230 int to_node;
231 int data_size;
232 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700233 int return_error_line;
234 uint32_t return_error;
235 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200236 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900237};
238struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700239 atomic_t cur;
240 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900241 struct binder_transaction_log_entry entry[32];
242};
243static struct binder_transaction_log binder_transaction_log;
244static struct binder_transaction_log binder_transaction_log_failed;
245
246static struct binder_transaction_log_entry *binder_transaction_log_add(
247 struct binder_transaction_log *log)
248{
249 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700250 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900251
Todd Kjos1cfe6272017-05-24 13:33:28 -0700252 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900253 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700254 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
255 WRITE_ONCE(e->debug_id_done, 0);
256 /*
257 * write-barrier to synchronize access to e->debug_id_done.
258 * We make sure the initialized 0 value is seen before
259 * memset() other fields are zeroed by memset.
260 */
261 smp_wmb();
262 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900263 return e;
264}
265
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200266struct binder_context {
267 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700268 struct mutex context_mgr_node_lock;
269
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200270 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200271 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200272};
273
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200274struct binder_device {
275 struct hlist_node hlist;
276 struct miscdevice miscdev;
277 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200278};
279
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700280/**
281 * struct binder_work - work enqueued on a worklist
282 * @entry: node enqueued on list
283 * @type: type of work to be performed
284 *
285 * There are separate work lists for proc, thread, and node (async).
286 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900287struct binder_work {
288 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700289
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900290 enum {
291 BINDER_WORK_TRANSACTION = 1,
292 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700293 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900294 BINDER_WORK_NODE,
295 BINDER_WORK_DEAD_BINDER,
296 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
297 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
298 } type;
299};
300
Todd Kjos858b8da2017-04-21 17:35:12 -0700301struct binder_error {
302 struct binder_work work;
303 uint32_t cmd;
304};
305
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700306/**
307 * struct binder_node - binder node bookkeeping
308 * @debug_id: unique ID for debugging
309 * (invariant after initialized)
310 * @lock: lock for node fields
311 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700312 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700313 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700314 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700315 * @dead_node: element for binder_dead_nodes list
316 * (protected by binder_dead_nodes_lock)
317 * @proc: binder_proc that owns this node
318 * (invariant after initialized)
319 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700320 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700323 * (protected by @proc->inner_lock if @proc
324 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700325 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700326 * (protected by @proc->inner_lock if @proc
327 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700328 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700329 * (protected by @proc->inner_lock if @proc
330 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700331 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700342 * (protected by @proc->inner_lock if @proc
343 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700344 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700345 * (protected by @proc->inner_lock if @proc
346 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700347 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700348 * (protected by @proc->inner_lock if @proc
349 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700350 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700351 * (protected by @proc->inner_lock if @proc
352 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700353 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700354 * (protected by @lock)
Martijn Coenen6aac9792017-06-07 09:29:14 -0700355 * @sched_policy: minimum scheduling policy for node
356 * (invariant after initialized)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700362 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700363 *
364 * Bookkeeping structure for binder nodes.
365 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900366struct binder_node {
367 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700368 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900369 struct binder_work work;
370 union {
371 struct rb_node rb_node;
372 struct hlist_node dead_node;
373 };
374 struct binder_proc *proc;
375 struct hlist_head refs;
376 int internal_strong_refs;
377 int local_weak_refs;
378 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700379 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800380 binder_uintptr_t ptr;
381 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700382 struct {
383 /*
384 * bitfield elements protected by
385 * proc inner_lock
386 */
387 u8 has_strong_ref:1;
388 u8 pending_strong_ref:1;
389 u8 has_weak_ref:1;
390 u8 pending_weak_ref:1;
391 };
392 struct {
393 /*
394 * invariant after initialization
395 */
Martijn Coenen6aac9792017-06-07 09:29:14 -0700396 u8 sched_policy:2;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700397 u8 accept_fds:1;
398 u8 min_priority;
399 };
400 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900401 struct list_head async_todo;
402};
403
404struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700405 /**
406 * @work: worklist element for death notifications
407 * (protected by inner_lock of the proc that
408 * this ref belongs to)
409 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900410 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800411 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900412};
413
Todd Kjosb0117bb2017-05-08 09:16:27 -0700414/**
415 * struct binder_ref_data - binder_ref counts and id
416 * @debug_id: unique ID for the ref
417 * @desc: unique userspace handle for ref
418 * @strong: strong ref count (debugging only if not locked)
419 * @weak: weak ref count (debugging only if not locked)
420 *
421 * Structure to hold ref count and ref id information. Since
422 * the actual ref can only be accessed with a lock, this structure
423 * is used to return information about the ref to callers of
424 * ref inc/dec functions.
425 */
426struct binder_ref_data {
427 int debug_id;
428 uint32_t desc;
429 int strong;
430 int weak;
431};
432
433/**
434 * struct binder_ref - struct to track references on nodes
435 * @data: binder_ref_data containing id, handle, and current refcounts
436 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
437 * @rb_node_node: node for lookup by @node in proc's rb_tree
438 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700439 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700440 * @proc: binder_proc containing ref
441 * @node: binder_node of target node. When cleaning up a
442 * ref for deletion in binder_cleanup_ref, a non-NULL
443 * @node indicates the node must be freed
444 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenf9eac642017-05-22 11:26:23 -0700445 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700446 *
447 * Structure to track references from procA to target node (on procB). This
448 * structure is unsafe to access without holding @proc->outer_lock.
449 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900450struct binder_ref {
451 /* Lookups needed: */
452 /* node + proc => ref (transaction) */
453 /* desc + proc => ref (transaction, inc/dec ref) */
454 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700455 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900456 struct rb_node rb_node_desc;
457 struct rb_node rb_node_node;
458 struct hlist_node node_entry;
459 struct binder_proc *proc;
460 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900461 struct binder_ref_death *death;
462};
463
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900464enum binder_deferred_state {
465 BINDER_DEFERRED_PUT_FILES = 0x01,
466 BINDER_DEFERRED_FLUSH = 0x02,
467 BINDER_DEFERRED_RELEASE = 0x04,
468};
469
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700470/**
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700471 * struct binder_priority - scheduler policy and priority
472 * @sched_policy scheduler policy
473 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
474 *
475 * The binder driver supports inheriting the following scheduler policies:
476 * SCHED_NORMAL
477 * SCHED_BATCH
478 * SCHED_FIFO
479 * SCHED_RR
480 */
481struct binder_priority {
482 unsigned int sched_policy;
483 int prio;
484};
485
486/**
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700487 * struct binder_proc - binder process bookkeeping
488 * @proc_node: element for binder_procs list
489 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700490 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700491 * @nodes: rbtree of binder nodes associated with
492 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700493 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700494 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos5346bf32016-10-20 16:43:34 -0700495 * (protected by @outer_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700496 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos5346bf32016-10-20 16:43:34 -0700497 * (protected by @outer_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700498 * @waiting_threads: threads currently waiting for proc work
499 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700500 * @pid PID of group_leader of process
501 * (invariant after initialized)
502 * @tsk task_struct for group_leader of process
503 * (invariant after initialized)
504 * @files files_struct for process
505 * (invariant after initialized)
506 * @deferred_work_node: element for binder_deferred_list
507 * (protected by binder_deferred_lock)
508 * @deferred_work: bitmap of deferred work to perform
509 * (protected by binder_deferred_lock)
510 * @is_dead: process is dead and awaiting free
511 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700512 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700513 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700514 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700515 * @wait: wait queue head to wait for proc work
516 * (invariant after initialized)
517 * @stats: per-process binder statistics
518 * (atomics, no lock needed)
519 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700520 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700521 * @max_threads: cap on number of binder threads
Todd Kjosd600e902017-05-25 17:35:02 -0700522 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700523 * @requested_threads: number of binder threads requested but not
524 * yet started. In current implementation, can
525 * only be 0 or 1.
Todd Kjosd600e902017-05-25 17:35:02 -0700526 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700527 * @requested_threads_started: number binder threads started
Todd Kjosd600e902017-05-25 17:35:02 -0700528 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700529 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700530 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700531 * @default_priority: default scheduler priority
532 * (invariant after initialized)
533 * @debugfs_entry: debugfs node
534 * @alloc: binder allocator bookkeeping
535 * @context: binder_context for this proc
536 * (invariant after initialized)
537 * @inner_lock: can nest under outer_lock and/or node lock
538 * @outer_lock: no nesting under innor or node lock
539 * Lock order: 1) outer, 2) node, 3) inner
540 *
541 * Bookkeeping structure for binder processes
542 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900543struct binder_proc {
544 struct hlist_node proc_node;
545 struct rb_root threads;
546 struct rb_root nodes;
547 struct rb_root refs_by_desc;
548 struct rb_root refs_by_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700549 struct list_head waiting_threads;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900550 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900551 struct task_struct *tsk;
552 struct files_struct *files;
553 struct hlist_node deferred_work_node;
554 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700555 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900556
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900557 struct list_head todo;
558 wait_queue_head_t wait;
559 struct binder_stats stats;
560 struct list_head delivered_death;
561 int max_threads;
562 int requested_threads;
563 int requested_threads_started;
Todd Kjos2f993e22017-05-12 14:42:55 -0700564 int tmp_ref;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700565 struct binder_priority default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700566 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700567 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200568 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700569 spinlock_t inner_lock;
570 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900571};
572
573enum {
574 BINDER_LOOPER_STATE_REGISTERED = 0x01,
575 BINDER_LOOPER_STATE_ENTERED = 0x02,
576 BINDER_LOOPER_STATE_EXITED = 0x04,
577 BINDER_LOOPER_STATE_INVALID = 0x08,
578 BINDER_LOOPER_STATE_WAITING = 0x10,
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700579 BINDER_LOOPER_STATE_POLL = 0x20,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900580};
581
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700582/**
583 * struct binder_thread - binder thread bookkeeping
584 * @proc: binder process for this thread
585 * (invariant after initialization)
586 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700587 * (protected by @proc->inner_lock)
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700588 * @waiting_thread_node: element for @proc->waiting_threads list
589 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700590 * @pid: PID for this thread
591 * (invariant after initialization)
592 * @looper: bitmap of looping state
593 * (only accessed by this thread)
594 * @looper_needs_return: looping thread needs to exit driver
595 * (no lock needed)
596 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700597 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700598 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700599 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700600 * @return_error: transaction errors reported by this thread
601 * (only accessed by this thread)
602 * @reply_error: transaction errors reported by target thread
Martijn Coenen995a36e2017-06-02 13:36:52 -0700603 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700604 * @wait: wait queue for thread work
605 * @stats: per-thread statistics
606 * (atomics, no lock needed)
607 * @tmp_ref: temporary reference to indicate thread is in use
608 * (atomic since @proc->inner_lock cannot
609 * always be acquired)
610 * @is_dead: thread is dead and awaiting free
611 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700612 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700613 *
614 * Bookkeeping structure for binder threads.
615 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900616struct binder_thread {
617 struct binder_proc *proc;
618 struct rb_node rb_node;
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700619 struct list_head waiting_thread_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900620 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800621 int looper; /* only modified by this thread */
622 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900623 struct binder_transaction *transaction_stack;
624 struct list_head todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700625 struct binder_error return_error;
626 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900627 wait_queue_head_t wait;
628 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700629 atomic_t tmp_ref;
630 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900631};
632
633struct binder_transaction {
634 int debug_id;
635 struct binder_work work;
636 struct binder_thread *from;
637 struct binder_transaction *from_parent;
638 struct binder_proc *to_proc;
639 struct binder_thread *to_thread;
640 struct binder_transaction *to_parent;
641 unsigned need_reply:1;
642 /* unsigned is_dead:1; */ /* not used at the moment */
643
644 struct binder_buffer *buffer;
645 unsigned int code;
646 unsigned int flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -0700647 struct binder_priority priority;
648 struct binder_priority saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600649 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700650 /**
651 * @lock: protects @from, @to_proc, and @to_thread
652 *
653 * @from, @to_proc, and @to_thread can be set to NULL
654 * during thread teardown
655 */
656 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900657};
658
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700659/**
660 * binder_proc_lock() - Acquire outer lock for given binder_proc
661 * @proc: struct binder_proc to acquire
662 *
663 * Acquires proc->outer_lock. Used to protect binder_ref
664 * structures associated with the given proc.
665 */
666#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
667static void
668_binder_proc_lock(struct binder_proc *proc, int line)
669{
670 binder_debug(BINDER_DEBUG_SPINLOCKS,
671 "%s: line=%d\n", __func__, line);
672 spin_lock(&proc->outer_lock);
673}
674
675/**
676 * binder_proc_unlock() - Release spinlock for given binder_proc
677 * @proc: struct binder_proc to acquire
678 *
679 * Release lock acquired via binder_proc_lock()
680 */
681#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
682static void
683_binder_proc_unlock(struct binder_proc *proc, int line)
684{
685 binder_debug(BINDER_DEBUG_SPINLOCKS,
686 "%s: line=%d\n", __func__, line);
687 spin_unlock(&proc->outer_lock);
688}
689
690/**
691 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
692 * @proc: struct binder_proc to acquire
693 *
694 * Acquires proc->inner_lock. Used to protect todo lists
695 */
696#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
697static void
698_binder_inner_proc_lock(struct binder_proc *proc, int line)
699{
700 binder_debug(BINDER_DEBUG_SPINLOCKS,
701 "%s: line=%d\n", __func__, line);
702 spin_lock(&proc->inner_lock);
703}
704
705/**
706 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
707 * @proc: struct binder_proc to acquire
708 *
709 * Release lock acquired via binder_inner_proc_lock()
710 */
711#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
712static void
713_binder_inner_proc_unlock(struct binder_proc *proc, int line)
714{
715 binder_debug(BINDER_DEBUG_SPINLOCKS,
716 "%s: line=%d\n", __func__, line);
717 spin_unlock(&proc->inner_lock);
718}
719
720/**
721 * binder_node_lock() - Acquire spinlock for given binder_node
722 * @node: struct binder_node to acquire
723 *
724 * Acquires node->lock. Used to protect binder_node fields
725 */
726#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
727static void
728_binder_node_lock(struct binder_node *node, int line)
729{
730 binder_debug(BINDER_DEBUG_SPINLOCKS,
731 "%s: line=%d\n", __func__, line);
732 spin_lock(&node->lock);
733}
734
735/**
736 * binder_node_unlock() - Release spinlock for given binder_proc
737 * @node: struct binder_node to acquire
738 *
739 * Release lock acquired via binder_node_lock()
740 */
741#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
742static void
743_binder_node_unlock(struct binder_node *node, int line)
744{
745 binder_debug(BINDER_DEBUG_SPINLOCKS,
746 "%s: line=%d\n", __func__, line);
747 spin_unlock(&node->lock);
748}
749
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700750/**
751 * binder_node_inner_lock() - Acquire node and inner locks
752 * @node: struct binder_node to acquire
753 *
754 * Acquires node->lock. If node->proc also acquires
755 * proc->inner_lock. Used to protect binder_node fields
756 */
757#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
758static void
759_binder_node_inner_lock(struct binder_node *node, int line)
760{
761 binder_debug(BINDER_DEBUG_SPINLOCKS,
762 "%s: line=%d\n", __func__, line);
763 spin_lock(&node->lock);
764 if (node->proc)
765 binder_inner_proc_lock(node->proc);
766}
767
768/**
769 * binder_node_unlock() - Release node and inner locks
770 * @node: struct binder_node to acquire
771 *
772 * Release lock acquired via binder_node_lock()
773 */
774#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
775static void
776_binder_node_inner_unlock(struct binder_node *node, int line)
777{
778 struct binder_proc *proc = node->proc;
779
780 binder_debug(BINDER_DEBUG_SPINLOCKS,
781 "%s: line=%d\n", __func__, line);
782 if (proc)
783 binder_inner_proc_unlock(proc);
784 spin_unlock(&node->lock);
785}
786
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700787static bool binder_worklist_empty_ilocked(struct list_head *list)
788{
789 return list_empty(list);
790}
791
792/**
793 * binder_worklist_empty() - Check if no items on the work list
794 * @proc: binder_proc associated with list
795 * @list: list to check
796 *
797 * Return: true if there are no items on list, else false
798 */
799static bool binder_worklist_empty(struct binder_proc *proc,
800 struct list_head *list)
801{
802 bool ret;
803
804 binder_inner_proc_lock(proc);
805 ret = binder_worklist_empty_ilocked(list);
806 binder_inner_proc_unlock(proc);
807 return ret;
808}
809
810static void
811binder_enqueue_work_ilocked(struct binder_work *work,
812 struct list_head *target_list)
813{
814 BUG_ON(target_list == NULL);
815 BUG_ON(work->entry.next && !list_empty(&work->entry));
816 list_add_tail(&work->entry, target_list);
817}
818
819/**
820 * binder_enqueue_work() - Add an item to the work list
821 * @proc: binder_proc associated with list
822 * @work: struct binder_work to add to list
823 * @target_list: list to add work to
824 *
825 * Adds the work to the specified list. Asserts that work
826 * is not already on a list.
827 */
828static void
829binder_enqueue_work(struct binder_proc *proc,
830 struct binder_work *work,
831 struct list_head *target_list)
832{
833 binder_inner_proc_lock(proc);
834 binder_enqueue_work_ilocked(work, target_list);
835 binder_inner_proc_unlock(proc);
836}
837
838static void
839binder_dequeue_work_ilocked(struct binder_work *work)
840{
841 list_del_init(&work->entry);
842}
843
844/**
845 * binder_dequeue_work() - Removes an item from the work list
846 * @proc: binder_proc associated with list
847 * @work: struct binder_work to remove from list
848 *
849 * Removes the specified work item from whatever list it is on.
850 * Can safely be called if work is not on any list.
851 */
852static void
853binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
854{
855 binder_inner_proc_lock(proc);
856 binder_dequeue_work_ilocked(work);
857 binder_inner_proc_unlock(proc);
858}
859
860static struct binder_work *binder_dequeue_work_head_ilocked(
861 struct list_head *list)
862{
863 struct binder_work *w;
864
865 w = list_first_entry_or_null(list, struct binder_work, entry);
866 if (w)
867 list_del_init(&w->entry);
868 return w;
869}
870
871/**
872 * binder_dequeue_work_head() - Dequeues the item at head of list
873 * @proc: binder_proc associated with list
874 * @list: list to dequeue head
875 *
876 * Removes the head of the list if there are items on the list
877 *
878 * Return: pointer dequeued binder_work, NULL if list was empty
879 */
880static struct binder_work *binder_dequeue_work_head(
881 struct binder_proc *proc,
882 struct list_head *list)
883{
884 struct binder_work *w;
885
886 binder_inner_proc_lock(proc);
887 w = binder_dequeue_work_head_ilocked(list);
888 binder_inner_proc_unlock(proc);
889 return w;
890}
891
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900892static void
893binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700894static void binder_free_thread(struct binder_thread *thread);
895static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700896static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900897
Sachin Kamatefde99c2012-08-17 16:39:36 +0530898static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900899{
900 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900901 unsigned long rlim_cur;
902 unsigned long irqs;
903
904 if (files == NULL)
905 return -ESRCH;
906
Al Virodcfadfa2012-08-12 17:27:30 -0400907 if (!lock_task_sighand(proc->tsk, &irqs))
908 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900909
Al Virodcfadfa2012-08-12 17:27:30 -0400910 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
911 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900912
Al Virodcfadfa2012-08-12 17:27:30 -0400913 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900914}
915
916/*
917 * copied from fd_install
918 */
919static void task_fd_install(
920 struct binder_proc *proc, unsigned int fd, struct file *file)
921{
Al Virof869e8a2012-08-15 21:06:33 -0400922 if (proc->files)
923 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900924}
925
926/*
927 * copied from sys_close
928 */
929static long task_close_fd(struct binder_proc *proc, unsigned int fd)
930{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900931 int retval;
932
Al Viro483ce1d2012-08-19 12:04:24 -0400933 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900934 return -ESRCH;
935
Al Viro483ce1d2012-08-19 12:04:24 -0400936 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900937 /* can't restart close syscall because file table entry was cleared */
938 if (unlikely(retval == -ERESTARTSYS ||
939 retval == -ERESTARTNOINTR ||
940 retval == -ERESTARTNOHAND ||
941 retval == -ERESTART_RESTARTBLOCK))
942 retval = -EINTR;
943
944 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900945}
946
Martijn Coenen22d64e4322017-06-02 11:15:44 -0700947static bool binder_has_work_ilocked(struct binder_thread *thread,
948 bool do_proc_work)
949{
950 return !binder_worklist_empty_ilocked(&thread->todo) ||
951 thread->looper_need_return ||
952 (do_proc_work &&
953 !binder_worklist_empty_ilocked(&thread->proc->todo));
954}
955
956static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
957{
958 bool has_work;
959
960 binder_inner_proc_lock(thread->proc);
961 has_work = binder_has_work_ilocked(thread, do_proc_work);
962 binder_inner_proc_unlock(thread->proc);
963
964 return has_work;
965}
966
967static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
968{
969 return !thread->transaction_stack &&
970 binder_worklist_empty_ilocked(&thread->todo) &&
971 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
972 BINDER_LOOPER_STATE_REGISTERED));
973}
974
975static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
976 bool sync)
977{
978 struct rb_node *n;
979 struct binder_thread *thread;
980
981 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
982 thread = rb_entry(n, struct binder_thread, rb_node);
983 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
984 binder_available_for_proc_work_ilocked(thread)) {
985 if (sync)
986 wake_up_interruptible_sync(&thread->wait);
987 else
988 wake_up_interruptible(&thread->wait);
989 }
990 }
991}
992
Martijn Coenen053be422017-06-06 15:17:46 -0700993/**
994 * binder_select_thread_ilocked() - selects a thread for doing proc work.
995 * @proc: process to select a thread from
996 *
997 * Note that calling this function moves the thread off the waiting_threads
998 * list, so it can only be woken up by the caller of this function, or a
999 * signal. Therefore, callers *should* always wake up the thread this function
1000 * returns.
1001 *
1002 * Return: If there's a thread currently waiting for process work,
1003 * returns that thread. Otherwise returns NULL.
1004 */
1005static struct binder_thread *
1006binder_select_thread_ilocked(struct binder_proc *proc)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001007{
1008 struct binder_thread *thread;
1009
1010 BUG_ON(!spin_is_locked(&proc->inner_lock));
1011 thread = list_first_entry_or_null(&proc->waiting_threads,
1012 struct binder_thread,
1013 waiting_thread_node);
1014
Martijn Coenen053be422017-06-06 15:17:46 -07001015 if (thread)
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001016 list_del_init(&thread->waiting_thread_node);
Martijn Coenen053be422017-06-06 15:17:46 -07001017
1018 return thread;
1019}
1020
1021/**
1022 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1023 * @proc: process to wake up a thread in
1024 * @thread: specific thread to wake-up (may be NULL)
1025 * @sync: whether to do a synchronous wake-up
1026 *
1027 * This function wakes up a thread in the @proc process.
1028 * The caller may provide a specific thread to wake-up in
1029 * the @thread parameter. If @thread is NULL, this function
1030 * will wake up threads that have called poll().
1031 *
1032 * Note that for this function to work as expected, callers
1033 * should first call binder_select_thread() to find a thread
1034 * to handle the work (if they don't have a thread already),
1035 * and pass the result into the @thread parameter.
1036 */
1037static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1038 struct binder_thread *thread,
1039 bool sync)
1040{
1041 BUG_ON(!spin_is_locked(&proc->inner_lock));
1042
1043 if (thread) {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07001044 if (sync)
1045 wake_up_interruptible_sync(&thread->wait);
1046 else
1047 wake_up_interruptible(&thread->wait);
1048 return;
1049 }
1050
1051 /* Didn't find a thread waiting for proc work; this can happen
1052 * in two scenarios:
1053 * 1. All threads are busy handling transactions
1054 * In that case, one of those threads should call back into
1055 * the kernel driver soon and pick up this work.
1056 * 2. Threads are using the (e)poll interface, in which case
1057 * they may be blocked on the waitqueue without having been
1058 * added to waiting_threads. For this case, we just iterate
1059 * over all threads not handling transaction work, and
1060 * wake them all up. We wake all because we don't know whether
1061 * a thread that called into (e)poll is handling non-binder
1062 * work currently.
1063 */
1064 binder_wakeup_poll_threads_ilocked(proc, sync);
1065}
1066
Martijn Coenen053be422017-06-06 15:17:46 -07001067static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1068{
1069 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1070
1071 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1072}
1073
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001074static bool is_rt_policy(int policy)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001075{
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001076 return policy == SCHED_FIFO || policy == SCHED_RR;
1077}
Seunghun Lee10f62862014-05-01 01:30:23 +09001078
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001079static bool is_fair_policy(int policy)
1080{
1081 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1082}
1083
1084static bool binder_supported_policy(int policy)
1085{
1086 return is_fair_policy(policy) || is_rt_policy(policy);
1087}
1088
1089static int to_userspace_prio(int policy, int kernel_priority)
1090{
1091 if (is_fair_policy(policy))
1092 return PRIO_TO_NICE(kernel_priority);
1093 else
1094 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1095}
1096
1097static int to_kernel_prio(int policy, int user_priority)
1098{
1099 if (is_fair_policy(policy))
1100 return NICE_TO_PRIO(user_priority);
1101 else
1102 return MAX_USER_RT_PRIO - 1 - user_priority;
1103}
1104
1105static void binder_set_priority(struct task_struct *task,
1106 struct binder_priority desired)
1107{
1108 int priority; /* user-space prio value */
1109 bool has_cap_nice;
1110 unsigned int policy = desired.sched_policy;
1111
1112 if (task->policy == policy && task->normal_prio == desired.prio)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001113 return;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001114
1115 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1116
1117 priority = to_userspace_prio(policy, desired.prio);
1118
1119 if (is_rt_policy(policy) && !has_cap_nice) {
1120 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1121
1122 if (max_rtprio == 0) {
1123 policy = SCHED_NORMAL;
1124 priority = MIN_NICE;
1125 } else if (priority > max_rtprio) {
1126 priority = max_rtprio;
1127 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001128 }
Martijn Coenen57b2ac62017-06-06 17:04:42 -07001129
1130 if (is_fair_policy(policy) && !has_cap_nice) {
1131 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1132
1133 if (min_nice > MAX_NICE) {
1134 binder_user_error("%d RLIMIT_NICE not set\n",
1135 task->pid);
1136 return;
1137 } else if (priority < min_nice) {
1138 priority = min_nice;
1139 }
1140 }
1141
1142 if (policy != desired.sched_policy ||
1143 to_kernel_prio(policy, priority) != desired.prio)
1144 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1145 "%d: priority %d not allowed, using %d instead\n",
1146 task->pid, desired.prio,
1147 to_kernel_prio(policy, priority));
1148
1149 /* Set the actual priority */
1150 if (task->policy != policy || is_rt_policy(policy)) {
1151 struct sched_param params;
1152
1153 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1154
1155 sched_setscheduler_nocheck(task,
1156 policy | SCHED_RESET_ON_FORK,
1157 &params);
1158 }
1159 if (is_fair_policy(policy))
1160 set_user_nice(task, priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001161}
1162
Todd Kjos425d23f2017-06-12 12:07:26 -07001163static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1164 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001165{
1166 struct rb_node *n = proc->nodes.rb_node;
1167 struct binder_node *node;
1168
Todd Kjos425d23f2017-06-12 12:07:26 -07001169 BUG_ON(!spin_is_locked(&proc->inner_lock));
1170
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001171 while (n) {
1172 node = rb_entry(n, struct binder_node, rb_node);
1173
1174 if (ptr < node->ptr)
1175 n = n->rb_left;
1176 else if (ptr > node->ptr)
1177 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -07001178 else {
1179 /*
1180 * take an implicit weak reference
1181 * to ensure node stays alive until
1182 * call to binder_put_node()
1183 */
Todd Kjos425d23f2017-06-12 12:07:26 -07001184 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001185 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001186 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001187 }
1188 return NULL;
1189}
1190
Todd Kjos425d23f2017-06-12 12:07:26 -07001191static struct binder_node *binder_get_node(struct binder_proc *proc,
1192 binder_uintptr_t ptr)
1193{
1194 struct binder_node *node;
1195
1196 binder_inner_proc_lock(proc);
1197 node = binder_get_node_ilocked(proc, ptr);
1198 binder_inner_proc_unlock(proc);
1199 return node;
1200}
1201
1202static struct binder_node *binder_init_node_ilocked(
1203 struct binder_proc *proc,
1204 struct binder_node *new_node,
1205 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001206{
1207 struct rb_node **p = &proc->nodes.rb_node;
1208 struct rb_node *parent = NULL;
1209 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001210 binder_uintptr_t ptr = fp ? fp->binder : 0;
1211 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1212 __u32 flags = fp ? fp->flags : 0;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001213 s8 priority;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001214
Todd Kjos425d23f2017-06-12 12:07:26 -07001215 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001216 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001217
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001218 parent = *p;
1219 node = rb_entry(parent, struct binder_node, rb_node);
1220
1221 if (ptr < node->ptr)
1222 p = &(*p)->rb_left;
1223 else if (ptr > node->ptr)
1224 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001225 else {
1226 /*
1227 * A matching node is already in
1228 * the rb tree. Abandon the init
1229 * and return it.
1230 */
1231 binder_inc_node_tmpref_ilocked(node);
1232 return node;
1233 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001234 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001235 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001236 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001237 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001238 rb_link_node(&node->rb_node, parent, p);
1239 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001240 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001241 node->proc = proc;
1242 node->ptr = ptr;
1243 node->cookie = cookie;
1244 node->work.type = BINDER_WORK_NODE;
Martijn Coenen6aac9792017-06-07 09:29:14 -07001245 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1246 node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
1247 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1248 node->min_priority = to_kernel_prio(node->sched_policy, priority);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001249 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001250 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001251 INIT_LIST_HEAD(&node->work.entry);
1252 INIT_LIST_HEAD(&node->async_todo);
1253 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001254 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001255 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001256 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001257
1258 return node;
1259}
1260
1261static struct binder_node *binder_new_node(struct binder_proc *proc,
1262 struct flat_binder_object *fp)
1263{
1264 struct binder_node *node;
1265 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1266
1267 if (!new_node)
1268 return NULL;
1269 binder_inner_proc_lock(proc);
1270 node = binder_init_node_ilocked(proc, new_node, fp);
1271 binder_inner_proc_unlock(proc);
1272 if (node != new_node)
1273 /*
1274 * The node was already added by another thread
1275 */
1276 kfree(new_node);
1277
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001278 return node;
1279}
1280
Todd Kjose7f23ed2017-03-21 13:06:01 -07001281static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001282{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001283 kfree(node);
1284 binder_stats_deleted(BINDER_STAT_NODE);
1285}
1286
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001287static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1288 int internal,
1289 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001290{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001291 struct binder_proc *proc = node->proc;
1292
1293 BUG_ON(!spin_is_locked(&node->lock));
1294 if (proc)
1295 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001296 if (strong) {
1297 if (internal) {
1298 if (target_list == NULL &&
1299 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001300 !(node->proc &&
1301 node == node->proc->context->
1302 binder_context_mgr_node &&
1303 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301304 pr_err("invalid inc strong node for %d\n",
1305 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001306 return -EINVAL;
1307 }
1308 node->internal_strong_refs++;
1309 } else
1310 node->local_strong_refs++;
1311 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001312 binder_dequeue_work_ilocked(&node->work);
1313 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001314 }
1315 } else {
1316 if (!internal)
1317 node->local_weak_refs++;
1318 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1319 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301320 pr_err("invalid inc weak node for %d\n",
1321 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001322 return -EINVAL;
1323 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001324 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001325 }
1326 }
1327 return 0;
1328}
1329
Todd Kjose7f23ed2017-03-21 13:06:01 -07001330static int binder_inc_node(struct binder_node *node, int strong, int internal,
1331 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001332{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001333 int ret;
1334
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001335 binder_node_inner_lock(node);
1336 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1337 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001338
1339 return ret;
1340}
1341
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001342static bool binder_dec_node_nilocked(struct binder_node *node,
1343 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001344{
1345 struct binder_proc *proc = node->proc;
1346
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001347 BUG_ON(!spin_is_locked(&node->lock));
Todd Kjose7f23ed2017-03-21 13:06:01 -07001348 if (proc)
1349 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001350 if (strong) {
1351 if (internal)
1352 node->internal_strong_refs--;
1353 else
1354 node->local_strong_refs--;
1355 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001356 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001357 } else {
1358 if (!internal)
1359 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001360 if (node->local_weak_refs || node->tmp_refs ||
1361 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001362 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001363 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001364
1365 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001366 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001367 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07001368 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001369 }
1370 } else {
1371 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001372 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001373 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001374 binder_dequeue_work_ilocked(&node->work);
1375 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001376 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301377 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001378 node->debug_id);
1379 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001380 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001381 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001382 /*
1383 * tmp_refs could have changed so
1384 * check it again
1385 */
1386 if (node->tmp_refs) {
1387 spin_unlock(&binder_dead_nodes_lock);
1388 return false;
1389 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001390 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001391 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001392 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301393 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001394 node->debug_id);
1395 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001396 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001397 }
1398 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001399 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001400}
1401
Todd Kjose7f23ed2017-03-21 13:06:01 -07001402static void binder_dec_node(struct binder_node *node, int strong, int internal)
1403{
1404 bool free_node;
1405
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001406 binder_node_inner_lock(node);
1407 free_node = binder_dec_node_nilocked(node, strong, internal);
1408 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001409 if (free_node)
1410 binder_free_node(node);
1411}
1412
1413static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001414{
1415 /*
1416 * No call to binder_inc_node() is needed since we
1417 * don't need to inform userspace of any changes to
1418 * tmp_refs
1419 */
1420 node->tmp_refs++;
1421}
1422
1423/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001424 * binder_inc_node_tmpref() - take a temporary reference on node
1425 * @node: node to reference
1426 *
1427 * Take reference on node to prevent the node from being freed
1428 * while referenced only by a local variable. The inner lock is
1429 * needed to serialize with the node work on the queue (which
1430 * isn't needed after the node is dead). If the node is dead
1431 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1432 * node->tmp_refs against dead-node-only cases where the node
1433 * lock cannot be acquired (eg traversing the dead node list to
1434 * print nodes)
1435 */
1436static void binder_inc_node_tmpref(struct binder_node *node)
1437{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001438 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001439 if (node->proc)
1440 binder_inner_proc_lock(node->proc);
1441 else
1442 spin_lock(&binder_dead_nodes_lock);
1443 binder_inc_node_tmpref_ilocked(node);
1444 if (node->proc)
1445 binder_inner_proc_unlock(node->proc);
1446 else
1447 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001448 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001449}
1450
1451/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001452 * binder_dec_node_tmpref() - remove a temporary reference on node
1453 * @node: node to reference
1454 *
1455 * Release temporary reference on node taken via binder_inc_node_tmpref()
1456 */
1457static void binder_dec_node_tmpref(struct binder_node *node)
1458{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001459 bool free_node;
1460
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001461 binder_node_inner_lock(node);
1462 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001463 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001464 node->tmp_refs--;
1465 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001466 if (!node->proc)
1467 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001468 /*
1469 * Call binder_dec_node() to check if all refcounts are 0
1470 * and cleanup is needed. Calling with strong=0 and internal=1
1471 * causes no actual reference to be released in binder_dec_node().
1472 * If that changes, a change is needed here too.
1473 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001474 free_node = binder_dec_node_nilocked(node, 0, 1);
1475 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001476 if (free_node)
1477 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001478}
1479
1480static void binder_put_node(struct binder_node *node)
1481{
1482 binder_dec_node_tmpref(node);
1483}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001484
Todd Kjos5346bf32016-10-20 16:43:34 -07001485static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1486 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001487{
1488 struct rb_node *n = proc->refs_by_desc.rb_node;
1489 struct binder_ref *ref;
1490
1491 while (n) {
1492 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1493
Todd Kjosb0117bb2017-05-08 09:16:27 -07001494 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001495 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001496 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001497 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001498 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001499 binder_user_error("tried to use weak ref as strong ref\n");
1500 return NULL;
1501 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001502 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001503 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001504 }
1505 return NULL;
1506}
1507
Todd Kjosb0117bb2017-05-08 09:16:27 -07001508/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001509 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjosb0117bb2017-05-08 09:16:27 -07001510 * @proc: binder_proc that owns the ref
1511 * @node: binder_node of target
1512 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1513 *
1514 * Look up the ref for the given node and return it if it exists
1515 *
1516 * If it doesn't exist and the caller provides a newly allocated
1517 * ref, initialize the fields of the newly allocated ref and insert
1518 * into the given proc rb_trees and node refs list.
1519 *
1520 * Return: the ref for node. It is possible that another thread
1521 * allocated/initialized the ref first in which case the
1522 * returned ref would be different than the passed-in
1523 * new_ref. new_ref must be kfree'd by the caller in
1524 * this case.
1525 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001526static struct binder_ref *binder_get_ref_for_node_olocked(
1527 struct binder_proc *proc,
1528 struct binder_node *node,
1529 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001530{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001531 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001532 struct rb_node **p = &proc->refs_by_node.rb_node;
1533 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001534 struct binder_ref *ref;
1535 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001536
1537 while (*p) {
1538 parent = *p;
1539 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1540
1541 if (node < ref->node)
1542 p = &(*p)->rb_left;
1543 else if (node > ref->node)
1544 p = &(*p)->rb_right;
1545 else
1546 return ref;
1547 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001548 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001549 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001550
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001551 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001552 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001553 new_ref->proc = proc;
1554 new_ref->node = node;
1555 rb_link_node(&new_ref->rb_node_node, parent, p);
1556 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1557
Todd Kjosb0117bb2017-05-08 09:16:27 -07001558 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001559 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1560 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001561 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001562 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001563 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001564 }
1565
1566 p = &proc->refs_by_desc.rb_node;
1567 while (*p) {
1568 parent = *p;
1569 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1570
Todd Kjosb0117bb2017-05-08 09:16:27 -07001571 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001572 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001573 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001574 p = &(*p)->rb_right;
1575 else
1576 BUG();
1577 }
1578 rb_link_node(&new_ref->rb_node_desc, parent, p);
1579 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001580
1581 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001582 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001583
Todd Kjos4cbe5752017-05-01 17:21:51 -07001584 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1585 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001586 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001587 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001588 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001589 return new_ref;
1590}
1591
Todd Kjos5346bf32016-10-20 16:43:34 -07001592static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001593{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001594 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001595
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001596 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301597 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001598 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301599 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001600
1601 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1602 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001603
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001604 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001605 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001606 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001607
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001608 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001609 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1610 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001611 /*
1612 * Clear ref->node unless we want the caller to free the node
1613 */
1614 if (!delete_node) {
1615 /*
1616 * The caller uses ref->node to determine
1617 * whether the node needs to be freed. Clear
1618 * it since the node is still alive.
1619 */
1620 ref->node = NULL;
1621 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001622
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001623 if (ref->death) {
1624 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301625 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001626 ref->proc->pid, ref->data.debug_id,
1627 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001628 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001629 binder_stats_deleted(BINDER_STAT_DEATH);
1630 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001631 binder_stats_deleted(BINDER_STAT_REF);
1632}
1633
Todd Kjosb0117bb2017-05-08 09:16:27 -07001634/**
Todd Kjos5346bf32016-10-20 16:43:34 -07001635 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjosb0117bb2017-05-08 09:16:27 -07001636 * @ref: ref to be incremented
1637 * @strong: if true, strong increment, else weak
1638 * @target_list: list to queue node work on
1639 *
Todd Kjos5346bf32016-10-20 16:43:34 -07001640 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjosb0117bb2017-05-08 09:16:27 -07001641 *
1642 * Return: 0, if successful, else errno
1643 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001644static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1645 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001646{
1647 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001648
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001649 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001650 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001651 ret = binder_inc_node(ref->node, 1, 1, target_list);
1652 if (ret)
1653 return ret;
1654 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001655 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001656 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001657 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001658 ret = binder_inc_node(ref->node, 0, 1, target_list);
1659 if (ret)
1660 return ret;
1661 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001662 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001663 }
1664 return 0;
1665}
1666
Todd Kjosb0117bb2017-05-08 09:16:27 -07001667/**
1668 * binder_dec_ref() - dec the ref for given handle
1669 * @ref: ref to be decremented
1670 * @strong: if true, strong decrement, else weak
1671 *
1672 * Decrement the ref.
1673 *
Todd Kjosb0117bb2017-05-08 09:16:27 -07001674 * Return: true if ref is cleaned up and ready to be freed
1675 */
Todd Kjos5346bf32016-10-20 16:43:34 -07001676static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001677{
1678 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001679 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301680 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001681 ref->proc->pid, ref->data.debug_id,
1682 ref->data.desc, ref->data.strong,
1683 ref->data.weak);
1684 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001685 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001686 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001687 if (ref->data.strong == 0)
1688 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001689 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001690 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301691 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001692 ref->proc->pid, ref->data.debug_id,
1693 ref->data.desc, ref->data.strong,
1694 ref->data.weak);
1695 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001696 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001697 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001698 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001699 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001700 binder_cleanup_ref_olocked(ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001701 return true;
1702 }
1703 return false;
1704}
1705
1706/**
1707 * binder_get_node_from_ref() - get the node from the given proc/desc
1708 * @proc: proc containing the ref
1709 * @desc: the handle associated with the ref
1710 * @need_strong_ref: if true, only return node if ref is strong
1711 * @rdata: the id/refcount data for the ref
1712 *
1713 * Given a proc and ref handle, return the associated binder_node
1714 *
1715 * Return: a binder_node or NULL if not found or not strong when strong required
1716 */
1717static struct binder_node *binder_get_node_from_ref(
1718 struct binder_proc *proc,
1719 u32 desc, bool need_strong_ref,
1720 struct binder_ref_data *rdata)
1721{
1722 struct binder_node *node;
1723 struct binder_ref *ref;
1724
Todd Kjos5346bf32016-10-20 16:43:34 -07001725 binder_proc_lock(proc);
1726 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001727 if (!ref)
1728 goto err_no_ref;
1729 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001730 /*
1731 * Take an implicit reference on the node to ensure
1732 * it stays alive until the call to binder_put_node()
1733 */
1734 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001735 if (rdata)
1736 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001737 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001738
1739 return node;
1740
1741err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001742 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001743 return NULL;
1744}
1745
1746/**
1747 * binder_free_ref() - free the binder_ref
1748 * @ref: ref to free
1749 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001750 * Free the binder_ref. Free the binder_node indicated by ref->node
1751 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001752 */
1753static void binder_free_ref(struct binder_ref *ref)
1754{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001755 if (ref->node)
1756 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001757 kfree(ref->death);
1758 kfree(ref);
1759}
1760
1761/**
1762 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1763 * @proc: proc containing the ref
1764 * @desc: the handle associated with the ref
1765 * @increment: true=inc reference, false=dec reference
1766 * @strong: true=strong reference, false=weak reference
1767 * @rdata: the id/refcount data for the ref
1768 *
1769 * Given a proc and ref handle, increment or decrement the ref
1770 * according to "increment" arg.
1771 *
1772 * Return: 0 if successful, else errno
1773 */
1774static int binder_update_ref_for_handle(struct binder_proc *proc,
1775 uint32_t desc, bool increment, bool strong,
1776 struct binder_ref_data *rdata)
1777{
1778 int ret = 0;
1779 struct binder_ref *ref;
1780 bool delete_ref = false;
1781
Todd Kjos5346bf32016-10-20 16:43:34 -07001782 binder_proc_lock(proc);
1783 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001784 if (!ref) {
1785 ret = -EINVAL;
1786 goto err_no_ref;
1787 }
1788 if (increment)
Todd Kjos5346bf32016-10-20 16:43:34 -07001789 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001790 else
Todd Kjos5346bf32016-10-20 16:43:34 -07001791 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001792
1793 if (rdata)
1794 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001795 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001796
1797 if (delete_ref)
1798 binder_free_ref(ref);
1799 return ret;
1800
1801err_no_ref:
Todd Kjos5346bf32016-10-20 16:43:34 -07001802 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001803 return ret;
1804}
1805
1806/**
1807 * binder_dec_ref_for_handle() - dec the ref for given handle
1808 * @proc: proc containing the ref
1809 * @desc: the handle associated with the ref
1810 * @strong: true=strong reference, false=weak reference
1811 * @rdata: the id/refcount data for the ref
1812 *
1813 * Just calls binder_update_ref_for_handle() to decrement the ref.
1814 *
1815 * Return: 0 if successful, else errno
1816 */
1817static int binder_dec_ref_for_handle(struct binder_proc *proc,
1818 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1819{
1820 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1821}
1822
1823
1824/**
1825 * binder_inc_ref_for_node() - increment the ref for given proc/node
1826 * @proc: proc containing the ref
1827 * @node: target node
1828 * @strong: true=strong reference, false=weak reference
1829 * @target_list: worklist to use if node is incremented
1830 * @rdata: the id/refcount data for the ref
1831 *
1832 * Given a proc and node, increment the ref. Create the ref if it
1833 * doesn't already exist
1834 *
1835 * Return: 0 if successful, else errno
1836 */
1837static int binder_inc_ref_for_node(struct binder_proc *proc,
1838 struct binder_node *node,
1839 bool strong,
1840 struct list_head *target_list,
1841 struct binder_ref_data *rdata)
1842{
1843 struct binder_ref *ref;
1844 struct binder_ref *new_ref = NULL;
1845 int ret = 0;
1846
Todd Kjos5346bf32016-10-20 16:43:34 -07001847 binder_proc_lock(proc);
1848 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001849 if (!ref) {
Todd Kjos5346bf32016-10-20 16:43:34 -07001850 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001851 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1852 if (!new_ref)
1853 return -ENOMEM;
Todd Kjos5346bf32016-10-20 16:43:34 -07001854 binder_proc_lock(proc);
1855 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001856 }
Todd Kjos5346bf32016-10-20 16:43:34 -07001857 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001858 *rdata = ref->data;
Todd Kjos5346bf32016-10-20 16:43:34 -07001859 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001860 if (new_ref && ref != new_ref)
1861 /*
1862 * Another thread created the ref first so
1863 * free the one we allocated
1864 */
1865 kfree(new_ref);
1866 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001867}
1868
Martijn Coenen995a36e2017-06-02 13:36:52 -07001869static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1870 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001871{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001872 BUG_ON(!target_thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07001873 BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
Todd Kjos21ef40a2017-03-30 18:02:13 -07001874 BUG_ON(target_thread->transaction_stack != t);
1875 BUG_ON(target_thread->transaction_stack->from != target_thread);
1876 target_thread->transaction_stack =
1877 target_thread->transaction_stack->from_parent;
1878 t->from = NULL;
1879}
1880
Todd Kjos2f993e22017-05-12 14:42:55 -07001881/**
1882 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1883 * @thread: thread to decrement
1884 *
1885 * A thread needs to be kept alive while being used to create or
1886 * handle a transaction. binder_get_txn_from() is used to safely
1887 * extract t->from from a binder_transaction and keep the thread
1888 * indicated by t->from from being freed. When done with that
1889 * binder_thread, this function is called to decrement the
1890 * tmp_ref and free if appropriate (thread has been released
1891 * and no transaction being processed by the driver)
1892 */
1893static void binder_thread_dec_tmpref(struct binder_thread *thread)
1894{
1895 /*
1896 * atomic is used to protect the counter value while
1897 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07001898 */
Todd Kjosb4827902017-05-25 15:52:17 -07001899 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001900 atomic_dec(&thread->tmp_ref);
1901 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07001902 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001903 binder_free_thread(thread);
1904 return;
1905 }
Todd Kjosb4827902017-05-25 15:52:17 -07001906 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001907}
1908
1909/**
1910 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1911 * @proc: proc to decrement
1912 *
1913 * A binder_proc needs to be kept alive while being used to create or
1914 * handle a transaction. proc->tmp_ref is incremented when
1915 * creating a new transaction or the binder_proc is currently in-use
1916 * by threads that are being released. When done with the binder_proc,
1917 * this function is called to decrement the counter and free the
1918 * proc if appropriate (proc has been released, all threads have
1919 * been released and not currenly in-use to process a transaction).
1920 */
1921static void binder_proc_dec_tmpref(struct binder_proc *proc)
1922{
Todd Kjosb4827902017-05-25 15:52:17 -07001923 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001924 proc->tmp_ref--;
1925 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1926 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07001927 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001928 binder_free_proc(proc);
1929 return;
1930 }
Todd Kjosb4827902017-05-25 15:52:17 -07001931 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001932}
1933
1934/**
1935 * binder_get_txn_from() - safely extract the "from" thread in transaction
1936 * @t: binder transaction for t->from
1937 *
1938 * Atomically return the "from" thread and increment the tmp_ref
1939 * count for the thread to ensure it stays alive until
1940 * binder_thread_dec_tmpref() is called.
1941 *
1942 * Return: the value of t->from
1943 */
1944static struct binder_thread *binder_get_txn_from(
1945 struct binder_transaction *t)
1946{
1947 struct binder_thread *from;
1948
1949 spin_lock(&t->lock);
1950 from = t->from;
1951 if (from)
1952 atomic_inc(&from->tmp_ref);
1953 spin_unlock(&t->lock);
1954 return from;
1955}
1956
Martijn Coenen995a36e2017-06-02 13:36:52 -07001957/**
1958 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1959 * @t: binder transaction for t->from
1960 *
1961 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1962 * to guarantee that the thread cannot be released while operating on it.
1963 * The caller must call binder_inner_proc_unlock() to release the inner lock
1964 * as well as call binder_dec_thread_txn() to release the reference.
1965 *
1966 * Return: the value of t->from
1967 */
1968static struct binder_thread *binder_get_txn_from_and_acq_inner(
1969 struct binder_transaction *t)
1970{
1971 struct binder_thread *from;
1972
1973 from = binder_get_txn_from(t);
1974 if (!from)
1975 return NULL;
1976 binder_inner_proc_lock(from->proc);
1977 if (t->from) {
1978 BUG_ON(from != t->from);
1979 return from;
1980 }
1981 binder_inner_proc_unlock(from->proc);
1982 binder_thread_dec_tmpref(from);
1983 return NULL;
1984}
1985
Todd Kjos21ef40a2017-03-30 18:02:13 -07001986static void binder_free_transaction(struct binder_transaction *t)
1987{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001988 if (t->buffer)
1989 t->buffer->transaction = NULL;
1990 kfree(t);
1991 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1992}
1993
1994static void binder_send_failed_reply(struct binder_transaction *t,
1995 uint32_t error_code)
1996{
1997 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001998 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001999
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002000 BUG_ON(t->flags & TF_ONE_WAY);
2001 while (1) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002002 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002003 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002004 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2005 "send failed reply for transaction %d to %d:%d\n",
2006 t->debug_id,
2007 target_thread->proc->pid,
2008 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002009
Martijn Coenen995a36e2017-06-02 13:36:52 -07002010 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos858b8da2017-04-21 17:35:12 -07002011 if (target_thread->reply_error.cmd == BR_OK) {
2012 target_thread->reply_error.cmd = error_code;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002013 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002014 &target_thread->reply_error.work,
Todd Kjos858b8da2017-04-21 17:35:12 -07002015 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002016 wake_up_interruptible(&target_thread->wait);
2017 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07002018 WARN(1, "Unexpected reply error: %u\n",
2019 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002020 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002021 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002022 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07002023 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002024 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002025 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002026 next = t->from_parent;
2027
2028 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2029 "send failed reply for transaction %d, target dead\n",
2030 t->debug_id);
2031
Todd Kjos21ef40a2017-03-30 18:02:13 -07002032 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002033 if (next == NULL) {
2034 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2035 "reply failed, no target thread at root\n");
2036 return;
2037 }
2038 t = next;
2039 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2040 "reply failed, no target thread -- retry %d\n",
2041 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002042 }
2043}
2044
Martijn Coenen00c80372016-07-13 12:06:49 +02002045/**
2046 * binder_validate_object() - checks for a valid metadata object in a buffer.
2047 * @buffer: binder_buffer that we're parsing.
2048 * @offset: offset in the buffer at which to validate an object.
2049 *
2050 * Return: If there's a valid metadata object at @offset in @buffer, the
2051 * size of that object. Otherwise, it returns zero.
2052 */
2053static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2054{
2055 /* Check if we can read a header first */
2056 struct binder_object_header *hdr;
2057 size_t object_size = 0;
2058
2059 if (offset > buffer->data_size - sizeof(*hdr) ||
2060 buffer->data_size < sizeof(*hdr) ||
2061 !IS_ALIGNED(offset, sizeof(u32)))
2062 return 0;
2063
2064 /* Ok, now see if we can read a complete object. */
2065 hdr = (struct binder_object_header *)(buffer->data + offset);
2066 switch (hdr->type) {
2067 case BINDER_TYPE_BINDER:
2068 case BINDER_TYPE_WEAK_BINDER:
2069 case BINDER_TYPE_HANDLE:
2070 case BINDER_TYPE_WEAK_HANDLE:
2071 object_size = sizeof(struct flat_binder_object);
2072 break;
2073 case BINDER_TYPE_FD:
2074 object_size = sizeof(struct binder_fd_object);
2075 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002076 case BINDER_TYPE_PTR:
2077 object_size = sizeof(struct binder_buffer_object);
2078 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002079 case BINDER_TYPE_FDA:
2080 object_size = sizeof(struct binder_fd_array_object);
2081 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02002082 default:
2083 return 0;
2084 }
2085 if (offset <= buffer->data_size - object_size &&
2086 buffer->data_size >= object_size)
2087 return object_size;
2088 else
2089 return 0;
2090}
2091
Martijn Coenen5a6da532016-09-30 14:10:07 +02002092/**
2093 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2094 * @b: binder_buffer containing the object
2095 * @index: index in offset array at which the binder_buffer_object is
2096 * located
2097 * @start: points to the start of the offset array
2098 * @num_valid: the number of valid offsets in the offset array
2099 *
2100 * Return: If @index is within the valid range of the offset array
2101 * described by @start and @num_valid, and if there's a valid
2102 * binder_buffer_object at the offset found in index @index
2103 * of the offset array, that object is returned. Otherwise,
2104 * %NULL is returned.
2105 * Note that the offset found in index @index itself is not
2106 * verified; this function assumes that @num_valid elements
2107 * from @start were previously verified to have valid offsets.
2108 */
2109static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2110 binder_size_t index,
2111 binder_size_t *start,
2112 binder_size_t num_valid)
2113{
2114 struct binder_buffer_object *buffer_obj;
2115 binder_size_t *offp;
2116
2117 if (index >= num_valid)
2118 return NULL;
2119
2120 offp = start + index;
2121 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2122 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2123 return NULL;
2124
2125 return buffer_obj;
2126}
2127
2128/**
2129 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2130 * @b: transaction buffer
2131 * @objects_start start of objects buffer
2132 * @buffer: binder_buffer_object in which to fix up
2133 * @offset: start offset in @buffer to fix up
2134 * @last_obj: last binder_buffer_object that we fixed up in
2135 * @last_min_offset: minimum fixup offset in @last_obj
2136 *
2137 * Return: %true if a fixup in buffer @buffer at offset @offset is
2138 * allowed.
2139 *
2140 * For safety reasons, we only allow fixups inside a buffer to happen
2141 * at increasing offsets; additionally, we only allow fixup on the last
2142 * buffer object that was verified, or one of its parents.
2143 *
2144 * Example of what is allowed:
2145 *
2146 * A
2147 * B (parent = A, offset = 0)
2148 * C (parent = A, offset = 16)
2149 * D (parent = C, offset = 0)
2150 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2151 *
2152 * Examples of what is not allowed:
2153 *
2154 * Decreasing offsets within the same parent:
2155 * A
2156 * C (parent = A, offset = 16)
2157 * B (parent = A, offset = 0) // decreasing offset within A
2158 *
2159 * Referring to a parent that wasn't the last object or any of its parents:
2160 * A
2161 * B (parent = A, offset = 0)
2162 * C (parent = A, offset = 0)
2163 * C (parent = A, offset = 16)
2164 * D (parent = B, offset = 0) // B is not A or any of A's parents
2165 */
2166static bool binder_validate_fixup(struct binder_buffer *b,
2167 binder_size_t *objects_start,
2168 struct binder_buffer_object *buffer,
2169 binder_size_t fixup_offset,
2170 struct binder_buffer_object *last_obj,
2171 binder_size_t last_min_offset)
2172{
2173 if (!last_obj) {
2174 /* Nothing to fix up in */
2175 return false;
2176 }
2177
2178 while (last_obj != buffer) {
2179 /*
2180 * Safe to retrieve the parent of last_obj, since it
2181 * was already previously verified by the driver.
2182 */
2183 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2184 return false;
2185 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2186 last_obj = (struct binder_buffer_object *)
2187 (b->data + *(objects_start + last_obj->parent));
2188 }
2189 return (fixup_offset >= last_min_offset);
2190}
2191
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002192static void binder_transaction_buffer_release(struct binder_proc *proc,
2193 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002194 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002195{
Martijn Coenen5a6da532016-09-30 14:10:07 +02002196 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002197 int debug_id = buffer->debug_id;
2198
2199 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302200 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002201 proc->pid, buffer->debug_id,
2202 buffer->data_size, buffer->offsets_size, failed_at);
2203
2204 if (buffer->target_node)
2205 binder_dec_node(buffer->target_node, 1, 0);
2206
Martijn Coenen5a6da532016-09-30 14:10:07 +02002207 off_start = (binder_size_t *)(buffer->data +
2208 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002209 if (failed_at)
2210 off_end = failed_at;
2211 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02002212 off_end = (void *)off_start + buffer->offsets_size;
2213 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002214 struct binder_object_header *hdr;
2215 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002216
Martijn Coenen00c80372016-07-13 12:06:49 +02002217 if (object_size == 0) {
2218 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002219 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002220 continue;
2221 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002222 hdr = (struct binder_object_header *)(buffer->data + *offp);
2223 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002224 case BINDER_TYPE_BINDER:
2225 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002226 struct flat_binder_object *fp;
2227 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002228
Martijn Coenen00c80372016-07-13 12:06:49 +02002229 fp = to_flat_binder_object(hdr);
2230 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002231 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002232 pr_err("transaction release %d bad node %016llx\n",
2233 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002234 break;
2235 }
2236 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002237 " node %d u%016llx\n",
2238 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02002239 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2240 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07002241 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002242 } break;
2243 case BINDER_TYPE_HANDLE:
2244 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002245 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002246 struct binder_ref_data rdata;
2247 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002248
Martijn Coenen00c80372016-07-13 12:06:49 +02002249 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002250 ret = binder_dec_ref_for_handle(proc, fp->handle,
2251 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2252
2253 if (ret) {
2254 pr_err("transaction release %d bad handle %d, ret = %d\n",
2255 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002256 break;
2257 }
2258 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002259 " ref %d desc %d\n",
2260 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002261 } break;
2262
Martijn Coenen00c80372016-07-13 12:06:49 +02002263 case BINDER_TYPE_FD: {
2264 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2265
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002266 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002267 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002268 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002269 task_close_fd(proc, fp->fd);
2270 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002271 case BINDER_TYPE_PTR:
2272 /*
2273 * Nothing to do here, this will get cleaned up when the
2274 * transaction buffer gets freed
2275 */
2276 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002277 case BINDER_TYPE_FDA: {
2278 struct binder_fd_array_object *fda;
2279 struct binder_buffer_object *parent;
2280 uintptr_t parent_buffer;
2281 u32 *fd_array;
2282 size_t fd_index;
2283 binder_size_t fd_buf_size;
2284
2285 fda = to_binder_fd_array_object(hdr);
2286 parent = binder_validate_ptr(buffer, fda->parent,
2287 off_start,
2288 offp - off_start);
2289 if (!parent) {
2290 pr_err("transaction release %d bad parent offset",
2291 debug_id);
2292 continue;
2293 }
2294 /*
2295 * Since the parent was already fixed up, convert it
2296 * back to kernel address space to access it
2297 */
2298 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002299 binder_alloc_get_user_buffer_offset(
2300 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002301
2302 fd_buf_size = sizeof(u32) * fda->num_fds;
2303 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2304 pr_err("transaction release %d invalid number of fds (%lld)\n",
2305 debug_id, (u64)fda->num_fds);
2306 continue;
2307 }
2308 if (fd_buf_size > parent->length ||
2309 fda->parent_offset > parent->length - fd_buf_size) {
2310 /* No space for all file descriptors here. */
2311 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2312 debug_id, (u64)fda->num_fds);
2313 continue;
2314 }
2315 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2316 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2317 task_close_fd(proc, fd_array[fd_index]);
2318 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002319 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002320 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002321 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002322 break;
2323 }
2324 }
2325}
2326
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002327static int binder_translate_binder(struct flat_binder_object *fp,
2328 struct binder_transaction *t,
2329 struct binder_thread *thread)
2330{
2331 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002332 struct binder_proc *proc = thread->proc;
2333 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002334 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002335 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002336
2337 node = binder_get_node(proc, fp->binder);
2338 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002339 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002340 if (!node)
2341 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002342 }
2343 if (fp->cookie != node->cookie) {
2344 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2345 proc->pid, thread->pid, (u64)fp->binder,
2346 node->debug_id, (u64)fp->cookie,
2347 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002348 ret = -EINVAL;
2349 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002350 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002351 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2352 ret = -EPERM;
2353 goto done;
2354 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002355
Todd Kjosb0117bb2017-05-08 09:16:27 -07002356 ret = binder_inc_ref_for_node(target_proc, node,
2357 fp->hdr.type == BINDER_TYPE_BINDER,
2358 &thread->todo, &rdata);
2359 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002360 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002361
2362 if (fp->hdr.type == BINDER_TYPE_BINDER)
2363 fp->hdr.type = BINDER_TYPE_HANDLE;
2364 else
2365 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2366 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002367 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002368 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002369
Todd Kjosb0117bb2017-05-08 09:16:27 -07002370 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002371 binder_debug(BINDER_DEBUG_TRANSACTION,
2372 " node %d u%016llx -> ref %d desc %d\n",
2373 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002374 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002375done:
2376 binder_put_node(node);
2377 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002378}
2379
2380static int binder_translate_handle(struct flat_binder_object *fp,
2381 struct binder_transaction *t,
2382 struct binder_thread *thread)
2383{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002384 struct binder_proc *proc = thread->proc;
2385 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002386 struct binder_node *node;
2387 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002388 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002389
Todd Kjosb0117bb2017-05-08 09:16:27 -07002390 node = binder_get_node_from_ref(proc, fp->handle,
2391 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2392 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002393 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2394 proc->pid, thread->pid, fp->handle);
2395 return -EINVAL;
2396 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002397 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2398 ret = -EPERM;
2399 goto done;
2400 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002401
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002402 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002403 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002404 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2405 fp->hdr.type = BINDER_TYPE_BINDER;
2406 else
2407 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002408 fp->binder = node->ptr;
2409 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002410 if (node->proc)
2411 binder_inner_proc_lock(node->proc);
2412 binder_inc_node_nilocked(node,
2413 fp->hdr.type == BINDER_TYPE_BINDER,
2414 0, NULL);
2415 if (node->proc)
2416 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002417 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002418 binder_debug(BINDER_DEBUG_TRANSACTION,
2419 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002420 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2421 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002422 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002423 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002424 int ret;
2425 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002426
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002427 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002428 ret = binder_inc_ref_for_node(target_proc, node,
2429 fp->hdr.type == BINDER_TYPE_HANDLE,
2430 NULL, &dest_rdata);
2431 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002432 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002433
2434 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002435 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002436 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002437 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2438 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002439 binder_debug(BINDER_DEBUG_TRANSACTION,
2440 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002441 src_rdata.debug_id, src_rdata.desc,
2442 dest_rdata.debug_id, dest_rdata.desc,
2443 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002444 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002445done:
2446 binder_put_node(node);
2447 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002448}
2449
2450static int binder_translate_fd(int fd,
2451 struct binder_transaction *t,
2452 struct binder_thread *thread,
2453 struct binder_transaction *in_reply_to)
2454{
2455 struct binder_proc *proc = thread->proc;
2456 struct binder_proc *target_proc = t->to_proc;
2457 int target_fd;
2458 struct file *file;
2459 int ret;
2460 bool target_allows_fd;
2461
2462 if (in_reply_to)
2463 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2464 else
2465 target_allows_fd = t->buffer->target_node->accept_fds;
2466 if (!target_allows_fd) {
2467 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2468 proc->pid, thread->pid,
2469 in_reply_to ? "reply" : "transaction",
2470 fd);
2471 ret = -EPERM;
2472 goto err_fd_not_accepted;
2473 }
2474
2475 file = fget(fd);
2476 if (!file) {
2477 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2478 proc->pid, thread->pid, fd);
2479 ret = -EBADF;
2480 goto err_fget;
2481 }
2482 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2483 if (ret < 0) {
2484 ret = -EPERM;
2485 goto err_security;
2486 }
2487
2488 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2489 if (target_fd < 0) {
2490 ret = -ENOMEM;
2491 goto err_get_unused_fd;
2492 }
2493 task_fd_install(target_proc, target_fd, file);
2494 trace_binder_transaction_fd(t, fd, target_fd);
2495 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2496 fd, target_fd);
2497
2498 return target_fd;
2499
2500err_get_unused_fd:
2501err_security:
2502 fput(file);
2503err_fget:
2504err_fd_not_accepted:
2505 return ret;
2506}
2507
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002508static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2509 struct binder_buffer_object *parent,
2510 struct binder_transaction *t,
2511 struct binder_thread *thread,
2512 struct binder_transaction *in_reply_to)
2513{
2514 binder_size_t fdi, fd_buf_size, num_installed_fds;
2515 int target_fd;
2516 uintptr_t parent_buffer;
2517 u32 *fd_array;
2518 struct binder_proc *proc = thread->proc;
2519 struct binder_proc *target_proc = t->to_proc;
2520
2521 fd_buf_size = sizeof(u32) * fda->num_fds;
2522 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2523 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2524 proc->pid, thread->pid, (u64)fda->num_fds);
2525 return -EINVAL;
2526 }
2527 if (fd_buf_size > parent->length ||
2528 fda->parent_offset > parent->length - fd_buf_size) {
2529 /* No space for all file descriptors here. */
2530 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2531 proc->pid, thread->pid, (u64)fda->num_fds);
2532 return -EINVAL;
2533 }
2534 /*
2535 * Since the parent was already fixed up, convert it
2536 * back to the kernel address space to access it
2537 */
Todd Kjosd325d372016-10-10 10:40:53 -07002538 parent_buffer = parent->buffer -
2539 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002540 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2541 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2542 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2543 proc->pid, thread->pid);
2544 return -EINVAL;
2545 }
2546 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2547 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2548 in_reply_to);
2549 if (target_fd < 0)
2550 goto err_translate_fd_failed;
2551 fd_array[fdi] = target_fd;
2552 }
2553 return 0;
2554
2555err_translate_fd_failed:
2556 /*
2557 * Failed to allocate fd or security error, free fds
2558 * installed so far.
2559 */
2560 num_installed_fds = fdi;
2561 for (fdi = 0; fdi < num_installed_fds; fdi++)
2562 task_close_fd(target_proc, fd_array[fdi]);
2563 return target_fd;
2564}
2565
Martijn Coenen5a6da532016-09-30 14:10:07 +02002566static int binder_fixup_parent(struct binder_transaction *t,
2567 struct binder_thread *thread,
2568 struct binder_buffer_object *bp,
2569 binder_size_t *off_start,
2570 binder_size_t num_valid,
2571 struct binder_buffer_object *last_fixup_obj,
2572 binder_size_t last_fixup_min_off)
2573{
2574 struct binder_buffer_object *parent;
2575 u8 *parent_buffer;
2576 struct binder_buffer *b = t->buffer;
2577 struct binder_proc *proc = thread->proc;
2578 struct binder_proc *target_proc = t->to_proc;
2579
2580 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2581 return 0;
2582
2583 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2584 if (!parent) {
2585 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2586 proc->pid, thread->pid);
2587 return -EINVAL;
2588 }
2589
2590 if (!binder_validate_fixup(b, off_start,
2591 parent, bp->parent_offset,
2592 last_fixup_obj,
2593 last_fixup_min_off)) {
2594 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2595 proc->pid, thread->pid);
2596 return -EINVAL;
2597 }
2598
2599 if (parent->length < sizeof(binder_uintptr_t) ||
2600 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2601 /* No space for a pointer here! */
2602 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2603 proc->pid, thread->pid);
2604 return -EINVAL;
2605 }
2606 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002607 binder_alloc_get_user_buffer_offset(
2608 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002609 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2610
2611 return 0;
2612}
2613
Martijn Coenen053be422017-06-06 15:17:46 -07002614/**
2615 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2616 * @t: transaction to send
2617 * @proc: process to send the transaction to
2618 * @thread: thread in @proc to send the transaction to (may be NULL)
2619 *
2620 * This function queues a transaction to the specified process. It will try
2621 * to find a thread in the target process to handle the transaction and
2622 * wake it up. If no thread is found, the work is queued to the proc
2623 * waitqueue.
2624 *
2625 * If the @thread parameter is not NULL, the transaction is always queued
2626 * to the waitlist of that specific thread.
2627 *
2628 * Return: true if the transactions was successfully queued
2629 * false if the target process or thread is dead
2630 */
2631static bool binder_proc_transaction(struct binder_transaction *t,
2632 struct binder_proc *proc,
2633 struct binder_thread *thread)
2634{
2635 struct list_head *target_list = NULL;
2636 struct binder_node *node = t->buffer->target_node;
2637 bool oneway = !!(t->flags & TF_ONE_WAY);
2638 bool wakeup = true;
2639
2640 BUG_ON(!node);
2641 binder_node_lock(node);
2642 if (oneway) {
2643 BUG_ON(thread);
2644 if (node->has_async_transaction) {
2645 target_list = &node->async_todo;
2646 wakeup = false;
2647 } else {
2648 node->has_async_transaction = 1;
2649 }
2650 }
2651
2652 binder_inner_proc_lock(proc);
2653
2654 if (proc->is_dead || (thread && thread->is_dead)) {
2655 binder_inner_proc_unlock(proc);
2656 binder_node_unlock(node);
2657 return false;
2658 }
2659
2660 if (!thread && !target_list)
2661 thread = binder_select_thread_ilocked(proc);
2662
2663 if (thread)
2664 target_list = &thread->todo;
2665 else if (!target_list)
2666 target_list = &proc->todo;
2667 else
2668 BUG_ON(target_list != &node->async_todo);
2669
2670 binder_enqueue_work_ilocked(&t->work, target_list);
2671
2672 if (wakeup)
2673 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2674
2675 binder_inner_proc_unlock(proc);
2676 binder_node_unlock(node);
2677
2678 return true;
2679}
2680
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002681static void binder_transaction(struct binder_proc *proc,
2682 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002683 struct binder_transaction_data *tr, int reply,
2684 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002685{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002686 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002687 struct binder_transaction *t;
2688 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002689 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002690 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002691 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002692 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002693 struct binder_thread *target_thread = NULL;
2694 struct binder_node *target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002695 struct binder_transaction *in_reply_to = NULL;
2696 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002697 uint32_t return_error = 0;
2698 uint32_t return_error_param = 0;
2699 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002700 struct binder_buffer_object *last_fixup_obj = NULL;
2701 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002702 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002703 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002704
2705 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002706 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002707 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2708 e->from_proc = proc->pid;
2709 e->from_thread = thread->pid;
2710 e->target_handle = tr->target.handle;
2711 e->data_size = tr->data_size;
2712 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002713 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002714
2715 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002716 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002717 in_reply_to = thread->transaction_stack;
2718 if (in_reply_to == NULL) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07002719 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302720 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002721 proc->pid, thread->pid);
2722 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002723 return_error_param = -EPROTO;
2724 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002725 goto err_empty_call_stack;
2726 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002727 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002728 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302729 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002730 proc->pid, thread->pid, in_reply_to->debug_id,
2731 in_reply_to->to_proc ?
2732 in_reply_to->to_proc->pid : 0,
2733 in_reply_to->to_thread ?
2734 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002735 spin_unlock(&in_reply_to->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002736 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002737 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002738 return_error_param = -EPROTO;
2739 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002740 in_reply_to = NULL;
2741 goto err_bad_call_stack;
2742 }
2743 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002744 binder_inner_proc_unlock(proc);
Martijn Coenen57b2ac62017-06-06 17:04:42 -07002745 binder_set_priority(current, in_reply_to->saved_priority);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002746 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002747 if (target_thread == NULL) {
2748 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002749 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002750 goto err_dead_binder;
2751 }
2752 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302753 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002754 proc->pid, thread->pid,
2755 target_thread->transaction_stack ?
2756 target_thread->transaction_stack->debug_id : 0,
2757 in_reply_to->debug_id);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002758 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002759 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002760 return_error_param = -EPROTO;
2761 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002762 in_reply_to = NULL;
2763 target_thread = NULL;
2764 goto err_dead_binder;
2765 }
2766 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002767 target_proc->tmp_ref++;
Martijn Coenen995a36e2017-06-02 13:36:52 -07002768 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002769 } else {
2770 if (tr->target.handle) {
2771 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002772
Todd Kjosc37162d2017-05-26 11:56:29 -07002773 /*
2774 * There must already be a strong ref
2775 * on this node. If so, do a strong
2776 * increment on the node to ensure it
2777 * stays alive until the transaction is
2778 * done.
2779 */
Todd Kjos5346bf32016-10-20 16:43:34 -07002780 binder_proc_lock(proc);
2781 ref = binder_get_ref_olocked(proc, tr->target.handle,
2782 true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002783 if (ref) {
2784 binder_inc_node(ref->node, 1, 0, NULL);
2785 target_node = ref->node;
2786 }
Todd Kjos5346bf32016-10-20 16:43:34 -07002787 binder_proc_unlock(proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002788 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302789 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002790 proc->pid, thread->pid);
2791 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002792 return_error_param = -EINVAL;
2793 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002794 goto err_invalid_target_handle;
2795 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002796 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002797 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002798 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002799 if (target_node == NULL) {
2800 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002801 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjose598d172017-03-22 17:19:52 -07002802 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002803 goto err_no_context_mgr_node;
2804 }
Todd Kjosc37162d2017-05-26 11:56:29 -07002805 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002806 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002807 }
2808 e->to_node = target_node->debug_id;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002809 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002810 target_proc = target_node->proc;
2811 if (target_proc == NULL) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002812 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002813 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002814 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002815 goto err_dead_binder;
2816 }
Todd Kjosb4827902017-05-25 15:52:17 -07002817 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002818 target_proc->tmp_ref++;
Todd Kjosb4827902017-05-25 15:52:17 -07002819 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002820 binder_node_unlock(target_node);
Stephen Smalley79af7302015-01-21 10:54:10 -05002821 if (security_binder_transaction(proc->tsk,
2822 target_proc->tsk) < 0) {
2823 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002824 return_error_param = -EPERM;
2825 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05002826 goto err_invalid_target_handle;
2827 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002828 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002829 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2830 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002831
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002832 tmp = thread->transaction_stack;
2833 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002834 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302835 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002836 proc->pid, thread->pid, tmp->debug_id,
2837 tmp->to_proc ? tmp->to_proc->pid : 0,
2838 tmp->to_thread ?
2839 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002840 spin_unlock(&tmp->lock);
Martijn Coenen995a36e2017-06-02 13:36:52 -07002841 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002842 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002843 return_error_param = -EPROTO;
2844 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002845 goto err_bad_call_stack;
2846 }
2847 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002848 struct binder_thread *from;
2849
2850 spin_lock(&tmp->lock);
2851 from = tmp->from;
2852 if (from && from->proc == target_proc) {
2853 atomic_inc(&from->tmp_ref);
2854 target_thread = from;
2855 spin_unlock(&tmp->lock);
2856 break;
2857 }
2858 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002859 tmp = tmp->from_parent;
2860 }
2861 }
Martijn Coenen995a36e2017-06-02 13:36:52 -07002862 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002863 }
Martijn Coenen053be422017-06-06 15:17:46 -07002864 if (target_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002865 e->to_thread = target_thread->pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002866 e->to_proc = target_proc->pid;
2867
2868 /* TODO: reuse incoming transaction for reply */
2869 t = kzalloc(sizeof(*t), GFP_KERNEL);
2870 if (t == NULL) {
2871 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002872 return_error_param = -ENOMEM;
2873 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002874 goto err_alloc_t_failed;
2875 }
2876 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07002877 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002878
2879 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2880 if (tcomplete == NULL) {
2881 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002882 return_error_param = -ENOMEM;
2883 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002884 goto err_alloc_tcomplete_failed;
2885 }
2886 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2887
Todd Kjos1cfe6272017-05-24 13:33:28 -07002888 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002889
2890 if (reply)
2891 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002892 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002893 proc->pid, thread->pid, t->debug_id,
2894 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002895 (u64)tr->data.ptr.buffer,
2896 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002897 (u64)tr->data_size, (u64)tr->offsets_size,
2898 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002899 else
2900 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002901 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002902 proc->pid, thread->pid, t->debug_id,
2903 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002904 (u64)tr->data.ptr.buffer,
2905 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002906 (u64)tr->data_size, (u64)tr->offsets_size,
2907 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002908
2909 if (!reply && !(tr->flags & TF_ONE_WAY))
2910 t->from = thread;
2911 else
2912 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03002913 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002914 t->to_proc = target_proc;
2915 t->to_thread = target_thread;
2916 t->code = tr->code;
2917 t->flags = tr->flags;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07002918 if (!(t->flags & TF_ONE_WAY) &&
2919 binder_supported_policy(current->policy)) {
2920 /* Inherit supported policies for synchronous transactions */
2921 t->priority.sched_policy = current->policy;
2922 t->priority.prio = current->normal_prio;
2923 } else {
2924 /* Otherwise, fall back to the default priority */
2925 t->priority = target_proc->default_priority;
2926 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002927
2928 trace_binder_transaction(reply, t, target_node);
2929
Todd Kjosd325d372016-10-10 10:40:53 -07002930 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02002931 tr->offsets_size, extra_buffers_size,
2932 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07002933 if (IS_ERR(t->buffer)) {
2934 /*
2935 * -ESRCH indicates VMA cleared. The target is dying.
2936 */
2937 return_error_param = PTR_ERR(t->buffer);
2938 return_error = return_error_param == -ESRCH ?
2939 BR_DEAD_REPLY : BR_FAILED_REPLY;
2940 return_error_line = __LINE__;
2941 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002942 goto err_binder_alloc_buf_failed;
2943 }
2944 t->buffer->allow_user_free = 0;
2945 t->buffer->debug_id = t->debug_id;
2946 t->buffer->transaction = t;
2947 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002948 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002949 off_start = (binder_size_t *)(t->buffer->data +
2950 ALIGN(tr->data_size, sizeof(void *)));
2951 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002952
Arve Hjønnevågda498892014-02-21 14:40:26 -08002953 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2954 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302955 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2956 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002957 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002958 return_error_param = -EFAULT;
2959 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002960 goto err_copy_data_failed;
2961 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002962 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2963 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302964 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2965 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002966 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002967 return_error_param = -EFAULT;
2968 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002969 goto err_copy_data_failed;
2970 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002971 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2972 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2973 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002974 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002975 return_error_param = -EINVAL;
2976 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002977 goto err_bad_offset;
2978 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02002979 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2980 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2981 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05302982 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002983 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002984 return_error_param = -EINVAL;
2985 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002986 goto err_bad_offset;
2987 }
2988 off_end = (void *)off_start + tr->offsets_size;
2989 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2990 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002991 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002992 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002993 struct binder_object_header *hdr;
2994 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002995
Martijn Coenen00c80372016-07-13 12:06:49 +02002996 if (object_size == 0 || *offp < off_min) {
2997 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002998 proc->pid, thread->pid, (u64)*offp,
2999 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02003000 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003001 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003002 return_error_param = -EINVAL;
3003 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003004 goto err_bad_offset;
3005 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003006
3007 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3008 off_min = *offp + object_size;
3009 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003010 case BINDER_TYPE_BINDER:
3011 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003012 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003013
Martijn Coenen00c80372016-07-13 12:06:49 +02003014 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003015 ret = binder_translate_binder(fp, t, thread);
3016 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02003017 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003018 return_error_param = ret;
3019 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003020 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003021 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003022 } break;
3023 case BINDER_TYPE_HANDLE:
3024 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003025 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003026
Martijn Coenen00c80372016-07-13 12:06:49 +02003027 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003028 ret = binder_translate_handle(fp, t, thread);
3029 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003030 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003031 return_error_param = ret;
3032 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003033 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003034 }
3035 } break;
3036
3037 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02003038 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003039 int target_fd = binder_translate_fd(fp->fd, t, thread,
3040 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003041
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003042 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003043 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003044 return_error_param = target_fd;
3045 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003046 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003047 }
Martijn Coenen00c80372016-07-13 12:06:49 +02003048 fp->pad_binder = 0;
3049 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003050 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003051 case BINDER_TYPE_FDA: {
3052 struct binder_fd_array_object *fda =
3053 to_binder_fd_array_object(hdr);
3054 struct binder_buffer_object *parent =
3055 binder_validate_ptr(t->buffer, fda->parent,
3056 off_start,
3057 offp - off_start);
3058 if (!parent) {
3059 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3060 proc->pid, thread->pid);
3061 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003062 return_error_param = -EINVAL;
3063 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003064 goto err_bad_parent;
3065 }
3066 if (!binder_validate_fixup(t->buffer, off_start,
3067 parent, fda->parent_offset,
3068 last_fixup_obj,
3069 last_fixup_min_off)) {
3070 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3071 proc->pid, thread->pid);
3072 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003073 return_error_param = -EINVAL;
3074 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003075 goto err_bad_parent;
3076 }
3077 ret = binder_translate_fd_array(fda, parent, t, thread,
3078 in_reply_to);
3079 if (ret < 0) {
3080 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003081 return_error_param = ret;
3082 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003083 goto err_translate_failed;
3084 }
3085 last_fixup_obj = parent;
3086 last_fixup_min_off =
3087 fda->parent_offset + sizeof(u32) * fda->num_fds;
3088 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003089 case BINDER_TYPE_PTR: {
3090 struct binder_buffer_object *bp =
3091 to_binder_buffer_object(hdr);
3092 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003093
Martijn Coenen5a6da532016-09-30 14:10:07 +02003094 if (bp->length > buf_left) {
3095 binder_user_error("%d:%d got transaction with too large buffer\n",
3096 proc->pid, thread->pid);
3097 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003098 return_error_param = -EINVAL;
3099 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003100 goto err_bad_offset;
3101 }
3102 if (copy_from_user(sg_bufp,
3103 (const void __user *)(uintptr_t)
3104 bp->buffer, bp->length)) {
3105 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3106 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07003107 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003108 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003109 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003110 goto err_copy_data_failed;
3111 }
3112 /* Fixup buffer pointer to target proc address space */
3113 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07003114 binder_alloc_get_user_buffer_offset(
3115 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02003116 sg_bufp += ALIGN(bp->length, sizeof(u64));
3117
3118 ret = binder_fixup_parent(t, thread, bp, off_start,
3119 offp - off_start,
3120 last_fixup_obj,
3121 last_fixup_min_off);
3122 if (ret < 0) {
3123 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003124 return_error_param = ret;
3125 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02003126 goto err_translate_failed;
3127 }
3128 last_fixup_obj = bp;
3129 last_fixup_min_off = 0;
3130 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003131 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01003132 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02003133 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003134 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07003135 return_error_param = -EINVAL;
3136 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003137 goto err_bad_object_type;
3138 }
3139 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003140 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003141 binder_enqueue_work(proc, tcomplete, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003142 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07003143
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003144 if (reply) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003145 binder_inner_proc_lock(target_proc);
3146 if (target_thread->is_dead) {
3147 binder_inner_proc_unlock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003148 goto err_dead_proc_or_thread;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003149 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003150 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003151 binder_pop_transaction_ilocked(target_thread, in_reply_to);
Martijn Coenen053be422017-06-06 15:17:46 -07003152 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003153 binder_inner_proc_unlock(target_proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003154 wake_up_interruptible_sync(&target_thread->wait);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003155 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003156 } else if (!(t->flags & TF_ONE_WAY)) {
3157 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003158 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003159 t->need_reply = 1;
3160 t->from_parent = thread->transaction_stack;
3161 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07003162 binder_inner_proc_unlock(proc);
Martijn Coenen053be422017-06-06 15:17:46 -07003163 if (!binder_proc_transaction(t, target_proc, target_thread)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07003164 binder_inner_proc_lock(proc);
3165 binder_pop_transaction_ilocked(thread, t);
3166 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003167 goto err_dead_proc_or_thread;
3168 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003169 } else {
3170 BUG_ON(target_node == NULL);
3171 BUG_ON(t->buffer->async_transaction != 1);
Martijn Coenen053be422017-06-06 15:17:46 -07003172 if (!binder_proc_transaction(t, target_proc, NULL))
Todd Kjos2f993e22017-05-12 14:42:55 -07003173 goto err_dead_proc_or_thread;
Riley Andrewsb5968812015-09-01 12:42:07 -07003174 }
Todd Kjos2f993e22017-05-12 14:42:55 -07003175 if (target_thread)
3176 binder_thread_dec_tmpref(target_thread);
3177 binder_proc_dec_tmpref(target_proc);
Todd Kjos1cfe6272017-05-24 13:33:28 -07003178 /*
3179 * write barrier to synchronize with initialization
3180 * of log entry
3181 */
3182 smp_wmb();
3183 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003184 return;
3185
Todd Kjos2f993e22017-05-12 14:42:55 -07003186err_dead_proc_or_thread:
3187 return_error = BR_DEAD_REPLY;
3188 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02003189err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003190err_bad_object_type:
3191err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02003192err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003193err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003194 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003195 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjosc37162d2017-05-26 11:56:29 -07003196 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003197 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07003198 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003199err_binder_alloc_buf_failed:
3200 kfree(tcomplete);
3201 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3202err_alloc_tcomplete_failed:
3203 kfree(t);
3204 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3205err_alloc_t_failed:
3206err_bad_call_stack:
3207err_empty_call_stack:
3208err_dead_binder:
3209err_invalid_target_handle:
3210err_no_context_mgr_node:
Todd Kjos2f993e22017-05-12 14:42:55 -07003211 if (target_thread)
3212 binder_thread_dec_tmpref(target_thread);
3213 if (target_proc)
3214 binder_proc_dec_tmpref(target_proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07003215 if (target_node)
3216 binder_dec_node(target_node, 1, 0);
3217
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003218 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07003219 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3220 proc->pid, thread->pid, return_error, return_error_param,
3221 (u64)tr->data_size, (u64)tr->offsets_size,
3222 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003223
3224 {
3225 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09003226
Todd Kjose598d172017-03-22 17:19:52 -07003227 e->return_error = return_error;
3228 e->return_error_param = return_error_param;
3229 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003230 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3231 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07003232 /*
3233 * write barrier to synchronize with initialization
3234 * of log entry
3235 */
3236 smp_wmb();
3237 WRITE_ONCE(e->debug_id_done, t_debug_id);
3238 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003239 }
3240
Todd Kjos858b8da2017-04-21 17:35:12 -07003241 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003242 if (in_reply_to) {
Todd Kjos858b8da2017-04-21 17:35:12 -07003243 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003244 binder_enqueue_work(thread->proc,
3245 &thread->return_error.work,
3246 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003247 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07003248 } else {
3249 thread->return_error.cmd = return_error;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003250 binder_enqueue_work(thread->proc,
3251 &thread->return_error.work,
3252 &thread->todo);
Todd Kjos858b8da2017-04-21 17:35:12 -07003253 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003254}
3255
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003256static int binder_thread_write(struct binder_proc *proc,
3257 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003258 binder_uintptr_t binder_buffer, size_t size,
3259 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003260{
3261 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02003262 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003263 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003264 void __user *ptr = buffer + *consumed;
3265 void __user *end = buffer + size;
3266
Todd Kjos858b8da2017-04-21 17:35:12 -07003267 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07003268 int ret;
3269
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003270 if (get_user(cmd, (uint32_t __user *)ptr))
3271 return -EFAULT;
3272 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003273 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003274 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003275 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3276 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3277 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003278 }
3279 switch (cmd) {
3280 case BC_INCREFS:
3281 case BC_ACQUIRE:
3282 case BC_RELEASE:
3283 case BC_DECREFS: {
3284 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003285 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003286 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3287 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3288 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003289
3290 if (get_user(target, (uint32_t __user *)ptr))
3291 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003292
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003293 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07003294 ret = -1;
3295 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003296 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003297 mutex_lock(&context->context_mgr_node_lock);
3298 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07003299 if (ctx_mgr_node)
3300 ret = binder_inc_ref_for_node(
3301 proc, ctx_mgr_node,
3302 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07003303 mutex_unlock(&context->context_mgr_node_lock);
3304 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07003305 if (ret)
3306 ret = binder_update_ref_for_handle(
3307 proc, target, increment, strong,
3308 &rdata);
3309 if (!ret && rdata.desc != target) {
3310 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3311 proc->pid, thread->pid,
3312 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003313 }
3314 switch (cmd) {
3315 case BC_INCREFS:
3316 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003317 break;
3318 case BC_ACQUIRE:
3319 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003320 break;
3321 case BC_RELEASE:
3322 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003323 break;
3324 case BC_DECREFS:
3325 default:
3326 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003327 break;
3328 }
3329 if (ret) {
3330 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3331 proc->pid, thread->pid, debug_string,
3332 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003333 break;
3334 }
3335 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003336 "%d:%d %s ref %d desc %d s %d w %d\n",
3337 proc->pid, thread->pid, debug_string,
3338 rdata.debug_id, rdata.desc, rdata.strong,
3339 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003340 break;
3341 }
3342 case BC_INCREFS_DONE:
3343 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003344 binder_uintptr_t node_ptr;
3345 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003346 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003347 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003348
Arve Hjønnevågda498892014-02-21 14:40:26 -08003349 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003350 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003351 ptr += sizeof(binder_uintptr_t);
3352 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003353 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003354 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003355 node = binder_get_node(proc, node_ptr);
3356 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003357 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003358 proc->pid, thread->pid,
3359 cmd == BC_INCREFS_DONE ?
3360 "BC_INCREFS_DONE" :
3361 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003362 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003363 break;
3364 }
3365 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003366 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003367 proc->pid, thread->pid,
3368 cmd == BC_INCREFS_DONE ?
3369 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003370 (u64)node_ptr, node->debug_id,
3371 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003372 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003373 break;
3374 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003375 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003376 if (cmd == BC_ACQUIRE_DONE) {
3377 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303378 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003379 proc->pid, thread->pid,
3380 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003381 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003382 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003383 break;
3384 }
3385 node->pending_strong_ref = 0;
3386 } else {
3387 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303388 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003389 proc->pid, thread->pid,
3390 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003391 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003392 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003393 break;
3394 }
3395 node->pending_weak_ref = 0;
3396 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003397 free_node = binder_dec_node_nilocked(node,
3398 cmd == BC_ACQUIRE_DONE, 0);
3399 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003400 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003401 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003402 proc->pid, thread->pid,
3403 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003404 node->debug_id, node->local_strong_refs,
3405 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003406 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003407 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003408 break;
3409 }
3410 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303411 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003412 return -EINVAL;
3413 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303414 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003415 return -EINVAL;
3416
3417 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003418 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003419 struct binder_buffer *buffer;
3420
Arve Hjønnevågda498892014-02-21 14:40:26 -08003421 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003422 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003423 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003424
Todd Kjos076072a2017-04-21 14:32:11 -07003425 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3426 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003427 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003428 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3429 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003430 break;
3431 }
3432 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003433 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3434 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003435 break;
3436 }
3437 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003438 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3439 proc->pid, thread->pid, (u64)data_ptr,
3440 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003441 buffer->transaction ? "active" : "finished");
3442
3443 if (buffer->transaction) {
3444 buffer->transaction->buffer = NULL;
3445 buffer->transaction = NULL;
3446 }
3447 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003448 struct binder_node *buf_node;
3449 struct binder_work *w;
3450
3451 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003452 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003453 BUG_ON(!buf_node->has_async_transaction);
3454 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003455 w = binder_dequeue_work_head_ilocked(
3456 &buf_node->async_todo);
3457 if (!w)
3458 buf_node->has_async_transaction = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003459 else
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003460 binder_enqueue_work_ilocked(
3461 w, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003462 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003463 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003464 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003465 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003466 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003467 break;
3468 }
3469
Martijn Coenen5a6da532016-09-30 14:10:07 +02003470 case BC_TRANSACTION_SG:
3471 case BC_REPLY_SG: {
3472 struct binder_transaction_data_sg tr;
3473
3474 if (copy_from_user(&tr, ptr, sizeof(tr)))
3475 return -EFAULT;
3476 ptr += sizeof(tr);
3477 binder_transaction(proc, thread, &tr.transaction_data,
3478 cmd == BC_REPLY_SG, tr.buffers_size);
3479 break;
3480 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003481 case BC_TRANSACTION:
3482 case BC_REPLY: {
3483 struct binder_transaction_data tr;
3484
3485 if (copy_from_user(&tr, ptr, sizeof(tr)))
3486 return -EFAULT;
3487 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003488 binder_transaction(proc, thread, &tr,
3489 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003490 break;
3491 }
3492
3493 case BC_REGISTER_LOOPER:
3494 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303495 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003496 proc->pid, thread->pid);
Todd Kjosd600e902017-05-25 17:35:02 -07003497 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003498 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3499 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303500 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003501 proc->pid, thread->pid);
3502 } else if (proc->requested_threads == 0) {
3503 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303504 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003505 proc->pid, thread->pid);
3506 } else {
3507 proc->requested_threads--;
3508 proc->requested_threads_started++;
3509 }
3510 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosd600e902017-05-25 17:35:02 -07003511 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003512 break;
3513 case BC_ENTER_LOOPER:
3514 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303515 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003516 proc->pid, thread->pid);
3517 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3518 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303519 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003520 proc->pid, thread->pid);
3521 }
3522 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3523 break;
3524 case BC_EXIT_LOOPER:
3525 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303526 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003527 proc->pid, thread->pid);
3528 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3529 break;
3530
3531 case BC_REQUEST_DEATH_NOTIFICATION:
3532 case BC_CLEAR_DEATH_NOTIFICATION: {
3533 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003534 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003535 struct binder_ref *ref;
Todd Kjos5346bf32016-10-20 16:43:34 -07003536 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003537
3538 if (get_user(target, (uint32_t __user *)ptr))
3539 return -EFAULT;
3540 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003541 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003542 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003543 ptr += sizeof(binder_uintptr_t);
Todd Kjos5346bf32016-10-20 16:43:34 -07003544 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3545 /*
3546 * Allocate memory for death notification
3547 * before taking lock
3548 */
3549 death = kzalloc(sizeof(*death), GFP_KERNEL);
3550 if (death == NULL) {
3551 WARN_ON(thread->return_error.cmd !=
3552 BR_OK);
3553 thread->return_error.cmd = BR_ERROR;
3554 binder_enqueue_work(
3555 thread->proc,
3556 &thread->return_error.work,
3557 &thread->todo);
3558 binder_debug(
3559 BINDER_DEBUG_FAILED_TRANSACTION,
3560 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3561 proc->pid, thread->pid);
3562 break;
3563 }
3564 }
3565 binder_proc_lock(proc);
3566 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003567 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303568 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003569 proc->pid, thread->pid,
3570 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3571 "BC_REQUEST_DEATH_NOTIFICATION" :
3572 "BC_CLEAR_DEATH_NOTIFICATION",
3573 target);
Todd Kjos5346bf32016-10-20 16:43:34 -07003574 binder_proc_unlock(proc);
3575 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003576 break;
3577 }
3578
3579 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003580 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003581 proc->pid, thread->pid,
3582 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3583 "BC_REQUEST_DEATH_NOTIFICATION" :
3584 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003585 (u64)cookie, ref->data.debug_id,
3586 ref->data.desc, ref->data.strong,
3587 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003588
Martijn Coenenf9eac642017-05-22 11:26:23 -07003589 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003590 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3591 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303592 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003593 proc->pid, thread->pid);
Martijn Coenenf9eac642017-05-22 11:26:23 -07003594 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003595 binder_proc_unlock(proc);
3596 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003597 break;
3598 }
3599 binder_stats_created(BINDER_STAT_DEATH);
3600 INIT_LIST_HEAD(&death->work.entry);
3601 death->cookie = cookie;
3602 ref->death = death;
3603 if (ref->node->proc == NULL) {
3604 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003605 if (thread->looper &
3606 (BINDER_LOOPER_STATE_REGISTERED |
3607 BINDER_LOOPER_STATE_ENTERED))
3608 binder_enqueue_work(
3609 proc,
3610 &ref->death->work,
3611 &thread->todo);
3612 else {
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003613 binder_inner_proc_lock(proc);
3614 binder_enqueue_work_ilocked(
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003615 &ref->death->work,
3616 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003617 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003618 proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003619 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003620 }
3621 }
3622 } else {
3623 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303624 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003625 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003626 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003627 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003628 break;
3629 }
3630 death = ref->death;
3631 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003632 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003633 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003634 (u64)death->cookie,
3635 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003636 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003637 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003638 break;
3639 }
3640 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003641 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003642 if (list_empty(&death->work.entry)) {
3643 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003644 if (thread->looper &
3645 (BINDER_LOOPER_STATE_REGISTERED |
3646 BINDER_LOOPER_STATE_ENTERED))
3647 binder_enqueue_work_ilocked(
3648 &death->work,
3649 &thread->todo);
3650 else {
3651 binder_enqueue_work_ilocked(
3652 &death->work,
3653 &proc->todo);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003654 binder_wakeup_proc_ilocked(
Martijn Coenen053be422017-06-06 15:17:46 -07003655 proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003656 }
3657 } else {
3658 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3659 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3660 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003661 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003662 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07003663 binder_node_unlock(ref->node);
Todd Kjos5346bf32016-10-20 16:43:34 -07003664 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003665 } break;
3666 case BC_DEAD_BINDER_DONE: {
3667 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003668 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003669 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003670
Arve Hjønnevågda498892014-02-21 14:40:26 -08003671 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003672 return -EFAULT;
3673
Lisa Du7a64cd82016-02-17 09:32:52 +08003674 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003675 binder_inner_proc_lock(proc);
3676 list_for_each_entry(w, &proc->delivered_death,
3677 entry) {
3678 struct binder_ref_death *tmp_death =
3679 container_of(w,
3680 struct binder_ref_death,
3681 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003682
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003683 if (tmp_death->cookie == cookie) {
3684 death = tmp_death;
3685 break;
3686 }
3687 }
3688 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003689 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3690 proc->pid, thread->pid, (u64)cookie,
3691 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003692 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003693 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3694 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003695 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003696 break;
3697 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003698 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003699 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3700 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003701 if (thread->looper &
3702 (BINDER_LOOPER_STATE_REGISTERED |
3703 BINDER_LOOPER_STATE_ENTERED))
3704 binder_enqueue_work_ilocked(
3705 &death->work, &thread->todo);
3706 else {
3707 binder_enqueue_work_ilocked(
3708 &death->work,
3709 &proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07003710 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003711 }
3712 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003713 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003714 } break;
3715
3716 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303717 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003718 proc->pid, thread->pid, cmd);
3719 return -EINVAL;
3720 }
3721 *consumed = ptr - buffer;
3722 }
3723 return 0;
3724}
3725
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003726static void binder_stat_br(struct binder_proc *proc,
3727 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003728{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003729 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003730 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003731 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3732 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3733 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003734 }
3735}
3736
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003737static int binder_has_thread_work(struct binder_thread *thread)
3738{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003739 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3740 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003741}
3742
Todd Kjos60792612017-05-24 10:51:01 -07003743static int binder_put_node_cmd(struct binder_proc *proc,
3744 struct binder_thread *thread,
3745 void __user **ptrp,
3746 binder_uintptr_t node_ptr,
3747 binder_uintptr_t node_cookie,
3748 int node_debug_id,
3749 uint32_t cmd, const char *cmd_name)
3750{
3751 void __user *ptr = *ptrp;
3752
3753 if (put_user(cmd, (uint32_t __user *)ptr))
3754 return -EFAULT;
3755 ptr += sizeof(uint32_t);
3756
3757 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3758 return -EFAULT;
3759 ptr += sizeof(binder_uintptr_t);
3760
3761 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3762 return -EFAULT;
3763 ptr += sizeof(binder_uintptr_t);
3764
3765 binder_stat_br(proc, thread, cmd);
3766 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3767 proc->pid, thread->pid, cmd_name, node_debug_id,
3768 (u64)node_ptr, (u64)node_cookie);
3769
3770 *ptrp = ptr;
3771 return 0;
3772}
3773
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003774static int binder_wait_for_work(struct binder_thread *thread,
3775 bool do_proc_work)
3776{
3777 DEFINE_WAIT(wait);
3778 struct binder_proc *proc = thread->proc;
3779 int ret = 0;
3780
3781 freezer_do_not_count();
3782 binder_inner_proc_lock(proc);
3783 for (;;) {
3784 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3785 if (binder_has_work_ilocked(thread, do_proc_work))
3786 break;
3787 if (do_proc_work)
3788 list_add(&thread->waiting_thread_node,
3789 &proc->waiting_threads);
3790 binder_inner_proc_unlock(proc);
3791 schedule();
3792 binder_inner_proc_lock(proc);
3793 list_del_init(&thread->waiting_thread_node);
3794 if (signal_pending(current)) {
3795 ret = -ERESTARTSYS;
3796 break;
3797 }
3798 }
3799 finish_wait(&thread->wait, &wait);
3800 binder_inner_proc_unlock(proc);
3801 freezer_count();
3802
3803 return ret;
3804}
3805
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003806static int binder_thread_read(struct binder_proc *proc,
3807 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003808 binder_uintptr_t binder_buffer, size_t size,
3809 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003810{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003811 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003812 void __user *ptr = buffer + *consumed;
3813 void __user *end = buffer + size;
3814
3815 int ret = 0;
3816 int wait_for_proc_work;
3817
3818 if (*consumed == 0) {
3819 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3820 return -EFAULT;
3821 ptr += sizeof(uint32_t);
3822 }
3823
3824retry:
Martijn Coenen995a36e2017-06-02 13:36:52 -07003825 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003826 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
Martijn Coenen995a36e2017-06-02 13:36:52 -07003827 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003828
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003829 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003830
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003831 trace_binder_wait_for_work(wait_for_proc_work,
3832 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003833 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003834 if (wait_for_proc_work) {
3835 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3836 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303837 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003838 proc->pid, thread->pid, thread->looper);
3839 wait_event_interruptible(binder_user_error_wait,
3840 binder_stop_on_user_error < 2);
3841 }
Martijn Coenen57b2ac62017-06-06 17:04:42 -07003842 binder_set_priority(current, proc->default_priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003843 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003844
Martijn Coenen22d64e4322017-06-02 11:15:44 -07003845 if (non_block) {
3846 if (!binder_has_work(thread, wait_for_proc_work))
3847 ret = -EAGAIN;
3848 } else {
3849 ret = binder_wait_for_work(thread, wait_for_proc_work);
3850 }
3851
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003852 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3853
3854 if (ret)
3855 return ret;
3856
3857 while (1) {
3858 uint32_t cmd;
3859 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003860 struct binder_work *w = NULL;
3861 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003862 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07003863 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003864
Todd Kjose7f23ed2017-03-21 13:06:01 -07003865 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003866 if (!binder_worklist_empty_ilocked(&thread->todo))
3867 list = &thread->todo;
3868 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3869 wait_for_proc_work)
3870 list = &proc->todo;
3871 else {
3872 binder_inner_proc_unlock(proc);
3873
Dmitry Voytik395262a2014-09-08 18:16:34 +04003874 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08003875 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003876 goto retry;
3877 break;
3878 }
3879
Todd Kjose7f23ed2017-03-21 13:06:01 -07003880 if (end - ptr < sizeof(tr) + 4) {
3881 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003882 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003883 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003884 w = binder_dequeue_work_head_ilocked(list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003885
3886 switch (w->type) {
3887 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003888 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003889 t = container_of(w, struct binder_transaction, work);
3890 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003891 case BINDER_WORK_RETURN_ERROR: {
3892 struct binder_error *e = container_of(
3893 w, struct binder_error, work);
3894
3895 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003896 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07003897 if (put_user(e->cmd, (uint32_t __user *)ptr))
3898 return -EFAULT;
3899 e->cmd = BR_OK;
3900 ptr += sizeof(uint32_t);
3901
3902 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07003903 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003904 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003905 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003906 cmd = BR_TRANSACTION_COMPLETE;
3907 if (put_user(cmd, (uint32_t __user *)ptr))
3908 return -EFAULT;
3909 ptr += sizeof(uint32_t);
3910
3911 binder_stat_br(proc, thread, cmd);
3912 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303913 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003914 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003915 kfree(w);
3916 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3917 } break;
3918 case BINDER_WORK_NODE: {
3919 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07003920 int strong, weak;
3921 binder_uintptr_t node_ptr = node->ptr;
3922 binder_uintptr_t node_cookie = node->cookie;
3923 int node_debug_id = node->debug_id;
3924 int has_weak_ref;
3925 int has_strong_ref;
3926 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09003927
Todd Kjos60792612017-05-24 10:51:01 -07003928 BUG_ON(proc != node->proc);
3929 strong = node->internal_strong_refs ||
3930 node->local_strong_refs;
3931 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07003932 node->local_weak_refs ||
3933 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07003934 has_strong_ref = node->has_strong_ref;
3935 has_weak_ref = node->has_weak_ref;
3936
3937 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003938 node->has_weak_ref = 1;
3939 node->pending_weak_ref = 1;
3940 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003941 }
3942 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003943 node->has_strong_ref = 1;
3944 node->pending_strong_ref = 1;
3945 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003946 }
3947 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003948 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003949 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003950 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003951 if (!weak && !strong) {
3952 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3953 "%d:%d node %d u%016llx c%016llx deleted\n",
3954 proc->pid, thread->pid,
3955 node_debug_id,
3956 (u64)node_ptr,
3957 (u64)node_cookie);
3958 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003959 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003960 binder_node_lock(node);
3961 /*
3962 * Acquire the node lock before freeing the
3963 * node to serialize with other threads that
3964 * may have been holding the node lock while
3965 * decrementing this node (avoids race where
3966 * this thread frees while the other thread
3967 * is unlocking the node after the final
3968 * decrement)
3969 */
3970 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003971 binder_free_node(node);
3972 } else
3973 binder_inner_proc_unlock(proc);
3974
Todd Kjos60792612017-05-24 10:51:01 -07003975 if (weak && !has_weak_ref)
3976 ret = binder_put_node_cmd(
3977 proc, thread, &ptr, node_ptr,
3978 node_cookie, node_debug_id,
3979 BR_INCREFS, "BR_INCREFS");
3980 if (!ret && strong && !has_strong_ref)
3981 ret = binder_put_node_cmd(
3982 proc, thread, &ptr, node_ptr,
3983 node_cookie, node_debug_id,
3984 BR_ACQUIRE, "BR_ACQUIRE");
3985 if (!ret && !strong && has_strong_ref)
3986 ret = binder_put_node_cmd(
3987 proc, thread, &ptr, node_ptr,
3988 node_cookie, node_debug_id,
3989 BR_RELEASE, "BR_RELEASE");
3990 if (!ret && !weak && has_weak_ref)
3991 ret = binder_put_node_cmd(
3992 proc, thread, &ptr, node_ptr,
3993 node_cookie, node_debug_id,
3994 BR_DECREFS, "BR_DECREFS");
3995 if (orig_ptr == ptr)
3996 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3997 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3998 proc->pid, thread->pid,
3999 node_debug_id,
4000 (u64)node_ptr,
4001 (u64)node_cookie);
4002 if (ret)
4003 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004004 } break;
4005 case BINDER_WORK_DEAD_BINDER:
4006 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4007 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4008 struct binder_ref_death *death;
4009 uint32_t cmd;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004010 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004011
4012 death = container_of(w, struct binder_ref_death, work);
4013 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4014 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4015 else
4016 cmd = BR_DEAD_BINDER;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004017 cookie = death->cookie;
4018
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004019 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004020 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004021 proc->pid, thread->pid,
4022 cmd == BR_DEAD_BINDER ?
4023 "BR_DEAD_BINDER" :
4024 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenf9eac642017-05-22 11:26:23 -07004025 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004026 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenf9eac642017-05-22 11:26:23 -07004027 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004028 kfree(death);
4029 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004030 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004031 binder_enqueue_work_ilocked(
4032 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004033 binder_inner_proc_unlock(proc);
4034 }
Martijn Coenenf9eac642017-05-22 11:26:23 -07004035 if (put_user(cmd, (uint32_t __user *)ptr))
4036 return -EFAULT;
4037 ptr += sizeof(uint32_t);
4038 if (put_user(cookie,
4039 (binder_uintptr_t __user *)ptr))
4040 return -EFAULT;
4041 ptr += sizeof(binder_uintptr_t);
4042 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004043 if (cmd == BR_DEAD_BINDER)
4044 goto done; /* DEAD_BINDER notifications can cause transactions */
4045 } break;
4046 }
4047
4048 if (!t)
4049 continue;
4050
4051 BUG_ON(t->buffer == NULL);
4052 if (t->buffer->target_node) {
4053 struct binder_node *target_node = t->buffer->target_node;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004054 struct binder_priority prio = t->priority;
Seunghun Lee10f62862014-05-01 01:30:23 +09004055
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004056 tr.target.ptr = target_node->ptr;
4057 tr.cookie = target_node->cookie;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004058 t->saved_priority.sched_policy = current->policy;
4059 t->saved_priority.prio = current->normal_prio;
Martijn Coenen6aac9792017-06-07 09:29:14 -07004060 if (target_node->min_priority < t->priority.prio ||
4061 (target_node->min_priority == t->priority.prio &&
4062 target_node->sched_policy == SCHED_FIFO)) {
4063 /*
4064 * In case the minimum priority on the node is
4065 * higher (lower value), use that priority. If
4066 * the priority is the same, but the node uses
4067 * SCHED_FIFO, prefer SCHED_FIFO, since it can
4068 * run unbounded, unlike SCHED_RR.
4069 */
4070 prio.sched_policy = target_node->sched_policy;
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004071 prio.prio = target_node->min_priority;
4072 }
4073 binder_set_priority(current, prio);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004074 cmd = BR_TRANSACTION;
4075 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004076 tr.target.ptr = 0;
4077 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004078 cmd = BR_REPLY;
4079 }
4080 tr.code = t->code;
4081 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06004082 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004083
Todd Kjos2f993e22017-05-12 14:42:55 -07004084 t_from = binder_get_txn_from(t);
4085 if (t_from) {
4086 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09004087
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004088 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08004089 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004090 } else {
4091 tr.sender_pid = 0;
4092 }
4093
4094 tr.data_size = t->buffer->data_size;
4095 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07004096 tr.data.ptr.buffer = (binder_uintptr_t)
4097 ((uintptr_t)t->buffer->data +
4098 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004099 tr.data.ptr.offsets = tr.data.ptr.buffer +
4100 ALIGN(t->buffer->data_size,
4101 sizeof(void *));
4102
Todd Kjos2f993e22017-05-12 14:42:55 -07004103 if (put_user(cmd, (uint32_t __user *)ptr)) {
4104 if (t_from)
4105 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004106 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004107 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004108 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07004109 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4110 if (t_from)
4111 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004112 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07004113 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004114 ptr += sizeof(tr);
4115
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004116 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004117 binder_stat_br(proc, thread, cmd);
4118 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004119 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004120 proc->pid, thread->pid,
4121 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4122 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07004123 t->debug_id, t_from ? t_from->proc->pid : 0,
4124 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004125 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004126 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004127
Todd Kjos2f993e22017-05-12 14:42:55 -07004128 if (t_from)
4129 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004130 t->buffer->allow_user_free = 1;
4131 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen995a36e2017-06-02 13:36:52 -07004132 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004133 t->to_parent = thread->transaction_stack;
4134 t->to_thread = thread;
4135 thread->transaction_stack = t;
Martijn Coenen995a36e2017-06-02 13:36:52 -07004136 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004137 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07004138 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004139 }
4140 break;
4141 }
4142
4143done:
4144
4145 *consumed = ptr - buffer;
Todd Kjosd600e902017-05-25 17:35:02 -07004146 binder_inner_proc_lock(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004147 if (proc->requested_threads == 0 &&
4148 list_empty(&thread->proc->waiting_threads) &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004149 proc->requested_threads_started < proc->max_threads &&
4150 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4151 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4152 /*spawn a new thread if we leave this out */) {
4153 proc->requested_threads++;
Todd Kjosd600e902017-05-25 17:35:02 -07004154 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004155 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304156 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004157 proc->pid, thread->pid);
4158 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4159 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07004160 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosd600e902017-05-25 17:35:02 -07004161 } else
4162 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004163 return 0;
4164}
4165
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004166static void binder_release_work(struct binder_proc *proc,
4167 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004168{
4169 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09004170
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004171 while (1) {
4172 w = binder_dequeue_work_head(proc, list);
4173 if (!w)
4174 return;
4175
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004176 switch (w->type) {
4177 case BINDER_WORK_TRANSACTION: {
4178 struct binder_transaction *t;
4179
4180 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004181 if (t->buffer->target_node &&
4182 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004183 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004184 } else {
4185 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304186 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004187 t->debug_id);
Todd Kjos21ef40a2017-03-30 18:02:13 -07004188 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004189 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004190 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004191 case BINDER_WORK_RETURN_ERROR: {
4192 struct binder_error *e = container_of(
4193 w, struct binder_error, work);
4194
4195 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4196 "undelivered TRANSACTION_ERROR: %u\n",
4197 e->cmd);
4198 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004199 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004200 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304201 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004202 kfree(w);
4203 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4204 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004205 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4206 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4207 struct binder_ref_death *death;
4208
4209 death = container_of(w, struct binder_ref_death, work);
4210 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004211 "undelivered death notification, %016llx\n",
4212 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004213 kfree(death);
4214 binder_stats_deleted(BINDER_STAT_DEATH);
4215 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004216 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304217 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004218 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004219 break;
4220 }
4221 }
4222
4223}
4224
Todd Kjosb4827902017-05-25 15:52:17 -07004225static struct binder_thread *binder_get_thread_ilocked(
4226 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004227{
4228 struct binder_thread *thread = NULL;
4229 struct rb_node *parent = NULL;
4230 struct rb_node **p = &proc->threads.rb_node;
4231
4232 while (*p) {
4233 parent = *p;
4234 thread = rb_entry(parent, struct binder_thread, rb_node);
4235
4236 if (current->pid < thread->pid)
4237 p = &(*p)->rb_left;
4238 else if (current->pid > thread->pid)
4239 p = &(*p)->rb_right;
4240 else
Todd Kjosb4827902017-05-25 15:52:17 -07004241 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004242 }
Todd Kjosb4827902017-05-25 15:52:17 -07004243 if (!new_thread)
4244 return NULL;
4245 thread = new_thread;
4246 binder_stats_created(BINDER_STAT_THREAD);
4247 thread->proc = proc;
4248 thread->pid = current->pid;
4249 atomic_set(&thread->tmp_ref, 0);
4250 init_waitqueue_head(&thread->wait);
4251 INIT_LIST_HEAD(&thread->todo);
4252 rb_link_node(&thread->rb_node, parent, p);
4253 rb_insert_color(&thread->rb_node, &proc->threads);
4254 thread->looper_need_return = true;
4255 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4256 thread->return_error.cmd = BR_OK;
4257 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4258 thread->reply_error.cmd = BR_OK;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004259 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004260 return thread;
4261}
4262
4263static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4264{
4265 struct binder_thread *thread;
4266 struct binder_thread *new_thread;
4267
4268 binder_inner_proc_lock(proc);
4269 thread = binder_get_thread_ilocked(proc, NULL);
4270 binder_inner_proc_unlock(proc);
4271 if (!thread) {
4272 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4273 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004274 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07004275 binder_inner_proc_lock(proc);
4276 thread = binder_get_thread_ilocked(proc, new_thread);
4277 binder_inner_proc_unlock(proc);
4278 if (thread != new_thread)
4279 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004280 }
4281 return thread;
4282}
4283
Todd Kjos2f993e22017-05-12 14:42:55 -07004284static void binder_free_proc(struct binder_proc *proc)
4285{
4286 BUG_ON(!list_empty(&proc->todo));
4287 BUG_ON(!list_empty(&proc->delivered_death));
4288 binder_alloc_deferred_release(&proc->alloc);
4289 put_task_struct(proc->tsk);
4290 binder_stats_deleted(BINDER_STAT_PROC);
4291 kfree(proc);
4292}
4293
4294static void binder_free_thread(struct binder_thread *thread)
4295{
4296 BUG_ON(!list_empty(&thread->todo));
4297 binder_stats_deleted(BINDER_STAT_THREAD);
4298 binder_proc_dec_tmpref(thread->proc);
4299 kfree(thread);
4300}
4301
4302static int binder_thread_release(struct binder_proc *proc,
4303 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004304{
4305 struct binder_transaction *t;
4306 struct binder_transaction *send_reply = NULL;
4307 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07004308 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004309
Todd Kjosb4827902017-05-25 15:52:17 -07004310 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004311 /*
4312 * take a ref on the proc so it survives
4313 * after we remove this thread from proc->threads.
4314 * The corresponding dec is when we actually
4315 * free the thread in binder_free_thread()
4316 */
4317 proc->tmp_ref++;
4318 /*
4319 * take a ref on this thread to ensure it
4320 * survives while we are releasing it
4321 */
4322 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004323 rb_erase(&thread->rb_node, &proc->threads);
4324 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07004325 if (t) {
4326 spin_lock(&t->lock);
4327 if (t->to_thread == thread)
4328 send_reply = t;
4329 }
4330 thread->is_dead = true;
4331
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004332 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07004333 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004334 active_transactions++;
4335 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304336 "release %d:%d transaction %d %s, still active\n",
4337 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004338 t->debug_id,
4339 (t->to_thread == thread) ? "in" : "out");
4340
4341 if (t->to_thread == thread) {
4342 t->to_proc = NULL;
4343 t->to_thread = NULL;
4344 if (t->buffer) {
4345 t->buffer->transaction = NULL;
4346 t->buffer = NULL;
4347 }
4348 t = t->to_parent;
4349 } else if (t->from == thread) {
4350 t->from = NULL;
4351 t = t->from_parent;
4352 } else
4353 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07004354 spin_unlock(&last_t->lock);
4355 if (t)
4356 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004357 }
Todd Kjosb4827902017-05-25 15:52:17 -07004358 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004359
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004360 if (send_reply)
4361 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004362 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004363 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004364 return active_transactions;
4365}
4366
4367static unsigned int binder_poll(struct file *filp,
4368 struct poll_table_struct *wait)
4369{
4370 struct binder_proc *proc = filp->private_data;
4371 struct binder_thread *thread = NULL;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004372 bool wait_for_proc_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004373
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004374 thread = binder_get_thread(proc);
4375
Martijn Coenen995a36e2017-06-02 13:36:52 -07004376 binder_inner_proc_lock(thread->proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004377 thread->looper |= BINDER_LOOPER_STATE_POLL;
4378 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4379
Martijn Coenen995a36e2017-06-02 13:36:52 -07004380 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004381
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004382 if (binder_has_work(thread, wait_for_proc_work))
4383 return POLLIN;
4384
4385 poll_wait(filp, &thread->wait, wait);
4386
4387 if (binder_has_thread_work(thread))
4388 return POLLIN;
4389
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004390 return 0;
4391}
4392
Tair Rzayev78260ac2014-06-03 22:27:21 +03004393static int binder_ioctl_write_read(struct file *filp,
4394 unsigned int cmd, unsigned long arg,
4395 struct binder_thread *thread)
4396{
4397 int ret = 0;
4398 struct binder_proc *proc = filp->private_data;
4399 unsigned int size = _IOC_SIZE(cmd);
4400 void __user *ubuf = (void __user *)arg;
4401 struct binder_write_read bwr;
4402
4403 if (size != sizeof(struct binder_write_read)) {
4404 ret = -EINVAL;
4405 goto out;
4406 }
4407 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4408 ret = -EFAULT;
4409 goto out;
4410 }
4411 binder_debug(BINDER_DEBUG_READ_WRITE,
4412 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4413 proc->pid, thread->pid,
4414 (u64)bwr.write_size, (u64)bwr.write_buffer,
4415 (u64)bwr.read_size, (u64)bwr.read_buffer);
4416
4417 if (bwr.write_size > 0) {
4418 ret = binder_thread_write(proc, thread,
4419 bwr.write_buffer,
4420 bwr.write_size,
4421 &bwr.write_consumed);
4422 trace_binder_write_done(ret);
4423 if (ret < 0) {
4424 bwr.read_consumed = 0;
4425 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4426 ret = -EFAULT;
4427 goto out;
4428 }
4429 }
4430 if (bwr.read_size > 0) {
4431 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4432 bwr.read_size,
4433 &bwr.read_consumed,
4434 filp->f_flags & O_NONBLOCK);
4435 trace_binder_read_done(ret);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004436 binder_inner_proc_lock(proc);
4437 if (!binder_worklist_empty_ilocked(&proc->todo))
Martijn Coenen053be422017-06-06 15:17:46 -07004438 binder_wakeup_proc_ilocked(proc);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004439 binder_inner_proc_unlock(proc);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004440 if (ret < 0) {
4441 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4442 ret = -EFAULT;
4443 goto out;
4444 }
4445 }
4446 binder_debug(BINDER_DEBUG_READ_WRITE,
4447 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4448 proc->pid, thread->pid,
4449 (u64)bwr.write_consumed, (u64)bwr.write_size,
4450 (u64)bwr.read_consumed, (u64)bwr.read_size);
4451 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4452 ret = -EFAULT;
4453 goto out;
4454 }
4455out:
4456 return ret;
4457}
4458
4459static int binder_ioctl_set_ctx_mgr(struct file *filp)
4460{
4461 int ret = 0;
4462 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004463 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004464 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004465 kuid_t curr_euid = current_euid();
4466
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004467 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004468 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004469 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4470 ret = -EBUSY;
4471 goto out;
4472 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004473 ret = security_binder_set_context_mgr(proc->tsk);
4474 if (ret < 0)
4475 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004476 if (uid_valid(context->binder_context_mgr_uid)) {
4477 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004478 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4479 from_kuid(&init_user_ns, curr_euid),
4480 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004481 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004482 ret = -EPERM;
4483 goto out;
4484 }
4485 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004486 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004487 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004488 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004489 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004490 ret = -ENOMEM;
4491 goto out;
4492 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004493 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004494 new_node->local_weak_refs++;
4495 new_node->local_strong_refs++;
4496 new_node->has_strong_ref = 1;
4497 new_node->has_weak_ref = 1;
4498 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004499 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004500 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004501out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004502 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004503 return ret;
4504}
4505
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004506static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4507{
4508 int ret;
4509 struct binder_proc *proc = filp->private_data;
4510 struct binder_thread *thread;
4511 unsigned int size = _IOC_SIZE(cmd);
4512 void __user *ubuf = (void __user *)arg;
4513
Tair Rzayev78260ac2014-06-03 22:27:21 +03004514 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4515 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004516
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004517 trace_binder_ioctl(cmd, arg);
4518
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004519 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4520 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004521 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004522
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004523 thread = binder_get_thread(proc);
4524 if (thread == NULL) {
4525 ret = -ENOMEM;
4526 goto err;
4527 }
4528
4529 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004530 case BINDER_WRITE_READ:
4531 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4532 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004533 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004534 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004535 case BINDER_SET_MAX_THREADS: {
4536 int max_threads;
4537
4538 if (copy_from_user(&max_threads, ubuf,
4539 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004540 ret = -EINVAL;
4541 goto err;
4542 }
Todd Kjosd600e902017-05-25 17:35:02 -07004543 binder_inner_proc_lock(proc);
4544 proc->max_threads = max_threads;
4545 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004546 break;
Todd Kjosd600e902017-05-25 17:35:02 -07004547 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004548 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004549 ret = binder_ioctl_set_ctx_mgr(filp);
4550 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004551 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004552 break;
4553 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304554 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004555 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004556 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004557 thread = NULL;
4558 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004559 case BINDER_VERSION: {
4560 struct binder_version __user *ver = ubuf;
4561
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004562 if (size != sizeof(struct binder_version)) {
4563 ret = -EINVAL;
4564 goto err;
4565 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004566 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4567 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004568 ret = -EINVAL;
4569 goto err;
4570 }
4571 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004572 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004573 default:
4574 ret = -EINVAL;
4575 goto err;
4576 }
4577 ret = 0;
4578err:
4579 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004580 thread->looper_need_return = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004581 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4582 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304583 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004584err_unlocked:
4585 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004586 return ret;
4587}
4588
4589static void binder_vma_open(struct vm_area_struct *vma)
4590{
4591 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004592
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004593 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304594 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004595 proc->pid, vma->vm_start, vma->vm_end,
4596 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4597 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004598}
4599
4600static void binder_vma_close(struct vm_area_struct *vma)
4601{
4602 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004603
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004604 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304605 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004606 proc->pid, vma->vm_start, vma->vm_end,
4607 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4608 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004609 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004610 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4611}
4612
Vinayak Menonddac7d52014-06-02 18:17:59 +05304613static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4614{
4615 return VM_FAULT_SIGBUS;
4616}
4617
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004618static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004619 .open = binder_vma_open,
4620 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304621 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004622};
4623
Todd Kjosd325d372016-10-10 10:40:53 -07004624static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4625{
4626 int ret;
4627 struct binder_proc *proc = filp->private_data;
4628 const char *failure_string;
4629
4630 if (proc->tsk != current->group_leader)
4631 return -EINVAL;
4632
4633 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4634 vma->vm_end = vma->vm_start + SZ_4M;
4635
4636 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4637 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4638 __func__, proc->pid, vma->vm_start, vma->vm_end,
4639 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4640 (unsigned long)pgprot_val(vma->vm_page_prot));
4641
4642 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4643 ret = -EPERM;
4644 failure_string = "bad vm_flags";
4645 goto err_bad_arg;
4646 }
4647 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4648 vma->vm_ops = &binder_vm_ops;
4649 vma->vm_private_data = proc;
4650
4651 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4652 if (ret)
4653 return ret;
4654 proc->files = get_files_struct(current);
4655 return 0;
4656
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004657err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004658 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004659 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4660 return ret;
4661}
4662
4663static int binder_open(struct inode *nodp, struct file *filp)
4664{
4665 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004666 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004667
4668 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4669 current->group_leader->pid, current->pid);
4670
4671 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4672 if (proc == NULL)
4673 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004674 spin_lock_init(&proc->inner_lock);
4675 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004676 get_task_struct(current->group_leader);
4677 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004678 INIT_LIST_HEAD(&proc->todo);
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004679 if (binder_supported_policy(current->policy)) {
4680 proc->default_priority.sched_policy = current->policy;
4681 proc->default_priority.prio = current->normal_prio;
4682 } else {
4683 proc->default_priority.sched_policy = SCHED_NORMAL;
4684 proc->default_priority.prio = NICE_TO_PRIO(0);
4685 }
4686
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004687 binder_dev = container_of(filp->private_data, struct binder_device,
4688 miscdev);
4689 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004690 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004691
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004692 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004693 proc->pid = current->group_leader->pid;
4694 INIT_LIST_HEAD(&proc->delivered_death);
Martijn Coenen22d64e4322017-06-02 11:15:44 -07004695 INIT_LIST_HEAD(&proc->waiting_threads);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004696 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004697
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004698 mutex_lock(&binder_procs_lock);
4699 hlist_add_head(&proc->proc_node, &binder_procs);
4700 mutex_unlock(&binder_procs_lock);
4701
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004702 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004703 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004704
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004705 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004706 /*
4707 * proc debug entries are shared between contexts, so
4708 * this will fail if the process tries to open the driver
4709 * again with a different context. The priting code will
4710 * anyway print all contexts that a given PID has, so this
4711 * is not a problem.
4712 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004713 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004714 binder_debugfs_dir_entry_proc,
4715 (void *)(unsigned long)proc->pid,
4716 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004717 }
4718
4719 return 0;
4720}
4721
4722static int binder_flush(struct file *filp, fl_owner_t id)
4723{
4724 struct binder_proc *proc = filp->private_data;
4725
4726 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4727
4728 return 0;
4729}
4730
4731static void binder_deferred_flush(struct binder_proc *proc)
4732{
4733 struct rb_node *n;
4734 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004735
Todd Kjosb4827902017-05-25 15:52:17 -07004736 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004737 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4738 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004739
Todd Kjos6798e6d2017-01-06 14:19:25 -08004740 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004741 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4742 wake_up_interruptible(&thread->wait);
4743 wake_count++;
4744 }
4745 }
Todd Kjosb4827902017-05-25 15:52:17 -07004746 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004747
4748 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4749 "binder_flush: %d woke %d threads\n", proc->pid,
4750 wake_count);
4751}
4752
4753static int binder_release(struct inode *nodp, struct file *filp)
4754{
4755 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004756
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004757 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004758 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4759
4760 return 0;
4761}
4762
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004763static int binder_node_release(struct binder_node *node, int refs)
4764{
4765 struct binder_ref *ref;
4766 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004767 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004768
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004769 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004770
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004771 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004772 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004773 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004774 /*
4775 * The caller must have taken a temporary ref on the node,
4776 */
4777 BUG_ON(!node->tmp_refs);
4778 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004779 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004780 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004781 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004782
4783 return refs;
4784 }
4785
4786 node->proc = NULL;
4787 node->local_strong_refs = 0;
4788 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004789 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004790
4791 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004792 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004793 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004794
4795 hlist_for_each_entry(ref, &node->refs, node_entry) {
4796 refs++;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004797 /*
4798 * Need the node lock to synchronize
4799 * with new notification requests and the
4800 * inner lock to synchronize with queued
4801 * death notifications.
4802 */
4803 binder_inner_proc_lock(ref->proc);
4804 if (!ref->death) {
4805 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08004806 continue;
Martijn Coenenf9eac642017-05-22 11:26:23 -07004807 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004808
4809 death++;
4810
Martijn Coenenf9eac642017-05-22 11:26:23 -07004811 BUG_ON(!list_empty(&ref->death->work.entry));
4812 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4813 binder_enqueue_work_ilocked(&ref->death->work,
4814 &ref->proc->todo);
Martijn Coenen053be422017-06-06 15:17:46 -07004815 binder_wakeup_proc_ilocked(ref->proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004816 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004817 }
4818
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004819 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4820 "node %d now dead, refs %d, death %d\n",
4821 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004822 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004823 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004824
4825 return refs;
4826}
4827
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004828static void binder_deferred_release(struct binder_proc *proc)
4829{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004830 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004831 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07004832 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004833
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004834 BUG_ON(proc->files);
4835
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004836 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004837 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004838 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004839
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004840 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004841 if (context->binder_context_mgr_node &&
4842 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004843 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004844 "%s: %d context_mgr_node gone\n",
4845 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004846 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004847 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004848 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07004849 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004850 /*
4851 * Make sure proc stays alive after we
4852 * remove all the threads
4853 */
4854 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004855
Todd Kjos2f993e22017-05-12 14:42:55 -07004856 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004857 threads = 0;
4858 active_transactions = 0;
4859 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004860 struct binder_thread *thread;
4861
4862 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004863 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004864 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07004865 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07004866 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004867 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004868
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004869 nodes = 0;
4870 incoming_refs = 0;
4871 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004872 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004873
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004874 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004875 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07004876 /*
4877 * take a temporary ref on the node before
4878 * calling binder_node_release() which will either
4879 * kfree() the node or call binder_put_node()
4880 */
Todd Kjos425d23f2017-06-12 12:07:26 -07004881 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004882 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07004883 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004884 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07004885 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004886 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004887 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004888
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004889 outgoing_refs = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07004890 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004891 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004892 struct binder_ref *ref;
4893
4894 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004895 outgoing_refs++;
Todd Kjos5346bf32016-10-20 16:43:34 -07004896 binder_cleanup_ref_olocked(ref);
4897 binder_proc_unlock(proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004898 binder_free_ref(ref);
Todd Kjos5346bf32016-10-20 16:43:34 -07004899 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004900 }
Todd Kjos5346bf32016-10-20 16:43:34 -07004901 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004902
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004903 binder_release_work(proc, &proc->todo);
4904 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004905
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004906 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07004907 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004908 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07004909 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004910
Todd Kjos2f993e22017-05-12 14:42:55 -07004911 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004912}
4913
4914static void binder_deferred_func(struct work_struct *work)
4915{
4916 struct binder_proc *proc;
4917 struct files_struct *files;
4918
4919 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09004920
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004921 do {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004922 mutex_lock(&binder_deferred_lock);
4923 if (!hlist_empty(&binder_deferred_list)) {
4924 proc = hlist_entry(binder_deferred_list.first,
4925 struct binder_proc, deferred_work_node);
4926 hlist_del_init(&proc->deferred_work_node);
4927 defer = proc->deferred_work;
4928 proc->deferred_work = 0;
4929 } else {
4930 proc = NULL;
4931 defer = 0;
4932 }
4933 mutex_unlock(&binder_deferred_lock);
4934
4935 files = NULL;
4936 if (defer & BINDER_DEFERRED_PUT_FILES) {
4937 files = proc->files;
4938 if (files)
4939 proc->files = NULL;
4940 }
4941
4942 if (defer & BINDER_DEFERRED_FLUSH)
4943 binder_deferred_flush(proc);
4944
4945 if (defer & BINDER_DEFERRED_RELEASE)
4946 binder_deferred_release(proc); /* frees proc */
4947
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004948 if (files)
4949 put_files_struct(files);
4950 } while (proc);
4951}
4952static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4953
4954static void
4955binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4956{
4957 mutex_lock(&binder_deferred_lock);
4958 proc->deferred_work |= defer;
4959 if (hlist_unhashed(&proc->deferred_work_node)) {
4960 hlist_add_head(&proc->deferred_work_node,
4961 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05304962 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004963 }
4964 mutex_unlock(&binder_deferred_lock);
4965}
4966
Todd Kjos6d241a42017-04-21 14:32:11 -07004967static void print_binder_transaction_ilocked(struct seq_file *m,
4968 struct binder_proc *proc,
4969 const char *prefix,
4970 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004971{
Todd Kjos6d241a42017-04-21 14:32:11 -07004972 struct binder_proc *to_proc;
4973 struct binder_buffer *buffer = t->buffer;
4974
4975 WARN_ON(!spin_is_locked(&proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07004976 spin_lock(&t->lock);
Todd Kjos6d241a42017-04-21 14:32:11 -07004977 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004978 seq_printf(m,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004979 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004980 prefix, t->debug_id, t,
4981 t->from ? t->from->proc->pid : 0,
4982 t->from ? t->from->pid : 0,
Todd Kjos6d241a42017-04-21 14:32:11 -07004983 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004984 t->to_thread ? t->to_thread->pid : 0,
Martijn Coenen57b2ac62017-06-06 17:04:42 -07004985 t->code, t->flags, t->priority.sched_policy,
4986 t->priority.prio, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07004987 spin_unlock(&t->lock);
4988
Todd Kjos6d241a42017-04-21 14:32:11 -07004989 if (proc != to_proc) {
4990 /*
4991 * Can only safely deref buffer if we are holding the
4992 * correct proc inner lock for this node
4993 */
4994 seq_puts(m, "\n");
4995 return;
4996 }
4997
4998 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004999 seq_puts(m, " buffer free\n");
5000 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005001 }
Todd Kjos6d241a42017-04-21 14:32:11 -07005002 if (buffer->target_node)
5003 seq_printf(m, " node %d", buffer->target_node->debug_id);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005004 seq_printf(m, " size %zd:%zd data %p\n",
Todd Kjos6d241a42017-04-21 14:32:11 -07005005 buffer->data_size, buffer->offsets_size,
5006 buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005007}
5008
Todd Kjos6d241a42017-04-21 14:32:11 -07005009static void print_binder_work_ilocked(struct seq_file *m,
5010 struct binder_proc *proc,
5011 const char *prefix,
5012 const char *transaction_prefix,
5013 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005014{
5015 struct binder_node *node;
5016 struct binder_transaction *t;
5017
5018 switch (w->type) {
5019 case BINDER_WORK_TRANSACTION:
5020 t = container_of(w, struct binder_transaction, work);
Todd Kjos6d241a42017-04-21 14:32:11 -07005021 print_binder_transaction_ilocked(
5022 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005023 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07005024 case BINDER_WORK_RETURN_ERROR: {
5025 struct binder_error *e = container_of(
5026 w, struct binder_error, work);
5027
5028 seq_printf(m, "%stransaction error: %u\n",
5029 prefix, e->cmd);
5030 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005031 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005032 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005033 break;
5034 case BINDER_WORK_NODE:
5035 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08005036 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5037 prefix, node->debug_id,
5038 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005039 break;
5040 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005041 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005042 break;
5043 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005044 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005045 break;
5046 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005047 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005048 break;
5049 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005050 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005051 break;
5052 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005053}
5054
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005055static void print_binder_thread_ilocked(struct seq_file *m,
5056 struct binder_thread *thread,
5057 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005058{
5059 struct binder_transaction *t;
5060 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005061 size_t start_pos = m->count;
5062 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005063
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005064 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07005065 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08005066 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07005067 thread->looper_need_return,
5068 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005069 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005070 t = thread->transaction_stack;
5071 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005072 if (t->from == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005073 print_binder_transaction_ilocked(m, thread->proc,
5074 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005075 t = t->from_parent;
5076 } else if (t->to_thread == thread) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005077 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005078 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005079 t = t->to_parent;
5080 } else {
Todd Kjos6d241a42017-04-21 14:32:11 -07005081 print_binder_transaction_ilocked(m, thread->proc,
5082 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005083 t = NULL;
5084 }
5085 }
5086 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos6d241a42017-04-21 14:32:11 -07005087 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005088 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005089 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005090 if (!print_always && m->count == header_pos)
5091 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005092}
5093
Todd Kjos425d23f2017-06-12 12:07:26 -07005094static void print_binder_node_nilocked(struct seq_file *m,
5095 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005096{
5097 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005098 struct binder_work *w;
5099 int count;
5100
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005101 WARN_ON(!spin_is_locked(&node->lock));
Todd Kjos425d23f2017-06-12 12:07:26 -07005102 if (node->proc)
5103 WARN_ON(!spin_is_locked(&node->proc->inner_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005104
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005105 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08005106 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005107 count++;
5108
Martijn Coenen6aac9792017-06-07 09:29:14 -07005109 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08005110 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Martijn Coenen6aac9792017-06-07 09:29:14 -07005111 node->sched_policy, node->min_priority,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005112 node->has_strong_ref, node->has_weak_ref,
5113 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07005114 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005115 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005116 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08005117 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005118 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005119 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005120 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005121 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005122 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005123 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005124 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005125 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005126}
5127
Todd Kjos5346bf32016-10-20 16:43:34 -07005128static void print_binder_ref_olocked(struct seq_file *m,
5129 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005130{
Todd Kjos5346bf32016-10-20 16:43:34 -07005131 WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005132 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07005133 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5134 ref->data.debug_id, ref->data.desc,
5135 ref->node->proc ? "" : "dead ",
5136 ref->node->debug_id, ref->data.strong,
5137 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005138 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005139}
5140
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005141static void print_binder_proc(struct seq_file *m,
5142 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005143{
5144 struct binder_work *w;
5145 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005146 size_t start_pos = m->count;
5147 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07005148 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005149
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005150 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005151 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005152 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005153
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005154 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005155 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005156 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005157 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07005158
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005159 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005160 struct binder_node *node = rb_entry(n, struct binder_node,
5161 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005162 /*
5163 * take a temporary reference on the node so it
5164 * survives and isn't removed from the tree
5165 * while we print it.
5166 */
5167 binder_inc_node_tmpref_ilocked(node);
5168 /* Need to drop inner lock to take node lock */
5169 binder_inner_proc_unlock(proc);
5170 if (last_node)
5171 binder_put_node(last_node);
5172 binder_node_inner_lock(node);
5173 print_binder_node_nilocked(m, node);
5174 binder_node_inner_unlock(node);
5175 last_node = node;
5176 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005177 }
Todd Kjos425d23f2017-06-12 12:07:26 -07005178 binder_inner_proc_unlock(proc);
5179 if (last_node)
5180 binder_put_node(last_node);
5181
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005182 if (print_all) {
Todd Kjos5346bf32016-10-20 16:43:34 -07005183 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005184 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005185 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005186 n = rb_next(n))
Todd Kjos5346bf32016-10-20 16:43:34 -07005187 print_binder_ref_olocked(m, rb_entry(n,
5188 struct binder_ref,
5189 rb_node_desc));
5190 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005191 }
Todd Kjosd325d372016-10-10 10:40:53 -07005192 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005193 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005194 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos6d241a42017-04-21 14:32:11 -07005195 print_binder_work_ilocked(m, proc, " ",
5196 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005197 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005198 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005199 break;
5200 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005201 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005202 if (!print_all && m->count == header_pos)
5203 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005204}
5205
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005206static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005207 "BR_ERROR",
5208 "BR_OK",
5209 "BR_TRANSACTION",
5210 "BR_REPLY",
5211 "BR_ACQUIRE_RESULT",
5212 "BR_DEAD_REPLY",
5213 "BR_TRANSACTION_COMPLETE",
5214 "BR_INCREFS",
5215 "BR_ACQUIRE",
5216 "BR_RELEASE",
5217 "BR_DECREFS",
5218 "BR_ATTEMPT_ACQUIRE",
5219 "BR_NOOP",
5220 "BR_SPAWN_LOOPER",
5221 "BR_FINISHED",
5222 "BR_DEAD_BINDER",
5223 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5224 "BR_FAILED_REPLY"
5225};
5226
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005227static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005228 "BC_TRANSACTION",
5229 "BC_REPLY",
5230 "BC_ACQUIRE_RESULT",
5231 "BC_FREE_BUFFER",
5232 "BC_INCREFS",
5233 "BC_ACQUIRE",
5234 "BC_RELEASE",
5235 "BC_DECREFS",
5236 "BC_INCREFS_DONE",
5237 "BC_ACQUIRE_DONE",
5238 "BC_ATTEMPT_ACQUIRE",
5239 "BC_REGISTER_LOOPER",
5240 "BC_ENTER_LOOPER",
5241 "BC_EXIT_LOOPER",
5242 "BC_REQUEST_DEATH_NOTIFICATION",
5243 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02005244 "BC_DEAD_BINDER_DONE",
5245 "BC_TRANSACTION_SG",
5246 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005247};
5248
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005249static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005250 "proc",
5251 "thread",
5252 "node",
5253 "ref",
5254 "death",
5255 "transaction",
5256 "transaction_complete"
5257};
5258
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005259static void print_binder_stats(struct seq_file *m, const char *prefix,
5260 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005261{
5262 int i;
5263
5264 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005265 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005266 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005267 int temp = atomic_read(&stats->bc[i]);
5268
5269 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005270 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005271 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005272 }
5273
5274 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005275 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005276 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005277 int temp = atomic_read(&stats->br[i]);
5278
5279 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005280 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005281 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005282 }
5283
5284 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005285 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005286 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005287 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005288 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005289 int created = atomic_read(&stats->obj_created[i]);
5290 int deleted = atomic_read(&stats->obj_deleted[i]);
5291
5292 if (created || deleted)
5293 seq_printf(m, "%s%s: active %d total %d\n",
5294 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005295 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07005296 created - deleted,
5297 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005298 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005299}
5300
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005301static void print_binder_proc_stats(struct seq_file *m,
5302 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005303{
5304 struct binder_work *w;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005305 struct binder_thread *thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005306 struct rb_node *n;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005307 int count, strong, weak, ready_threads;
Todd Kjosb4827902017-05-25 15:52:17 -07005308 size_t free_async_space =
5309 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005310
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005311 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005312 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005313 count = 0;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005314 ready_threads = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07005315 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005316 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5317 count++;
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005318
5319 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5320 ready_threads++;
5321
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005322 seq_printf(m, " threads: %d\n", count);
5323 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005324 " ready threads %d\n"
5325 " free async space %zd\n", proc->requested_threads,
5326 proc->requested_threads_started, proc->max_threads,
Martijn Coenen22d64e4322017-06-02 11:15:44 -07005327 ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07005328 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005329 count = 0;
5330 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5331 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07005332 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005333 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005334 count = 0;
5335 strong = 0;
5336 weak = 0;
Todd Kjos5346bf32016-10-20 16:43:34 -07005337 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005338 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5339 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5340 rb_node_desc);
5341 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07005342 strong += ref->data.strong;
5343 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005344 }
Todd Kjos5346bf32016-10-20 16:43:34 -07005345 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005346 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005347
Todd Kjosd325d372016-10-10 10:40:53 -07005348 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005349 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005350
5351 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005352 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005353 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005354 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005355 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005356 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07005357 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005358 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005359
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005360 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005361}
5362
5363
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005364static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005365{
5366 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005367 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005368 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005369
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005370 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005371
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005372 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005373 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005374 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005375 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5376 /*
5377 * take a temporary reference on the node so it
5378 * survives and isn't removed from the list
5379 * while we print it.
5380 */
5381 node->tmp_refs++;
5382 spin_unlock(&binder_dead_nodes_lock);
5383 if (last_node)
5384 binder_put_node(last_node);
5385 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07005386 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005387 binder_node_unlock(node);
5388 last_node = node;
5389 spin_lock(&binder_dead_nodes_lock);
5390 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005391 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07005392 if (last_node)
5393 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005394
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005395 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005396 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005397 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005398 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005399
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005400 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005401}
5402
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005403static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005404{
5405 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005406
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005407 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005408
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005409 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005410
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005411 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005412 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005413 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005414 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005415
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005416 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005417}
5418
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005419static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005420{
5421 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005422
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005423 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005424 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005425 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005426 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005427 mutex_unlock(&binder_procs_lock);
Todd Kjos218b6972016-11-14 11:37:41 -08005428
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005429 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005430}
5431
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005432static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005433{
Riley Andrews83050a42016-02-09 21:05:33 -08005434 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005435 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005436
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005437 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005438 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005439 if (itr->pid == pid) {
5440 seq_puts(m, "binder proc state:\n");
5441 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005442 }
5443 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005444 mutex_unlock(&binder_procs_lock);
5445
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005446 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005447}
5448
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005449static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005450 struct binder_transaction_log_entry *e)
5451{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005452 int debug_id = READ_ONCE(e->debug_id_done);
5453 /*
5454 * read barrier to guarantee debug_id_done read before
5455 * we print the log values
5456 */
5457 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005458 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005459 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005460 e->debug_id, (e->call_type == 2) ? "reply" :
5461 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005462 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005463 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5464 e->return_error, e->return_error_param,
5465 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005466 /*
5467 * read-barrier to guarantee read of debug_id_done after
5468 * done printing the fields of the entry
5469 */
5470 smp_rmb();
5471 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5472 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005473}
5474
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005475static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005476{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005477 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005478 unsigned int log_cur = atomic_read(&log->cur);
5479 unsigned int count;
5480 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005481 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005482
Todd Kjos1cfe6272017-05-24 13:33:28 -07005483 count = log_cur + 1;
5484 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5485 0 : count % ARRAY_SIZE(log->entry);
5486 if (count > ARRAY_SIZE(log->entry) || log->full)
5487 count = ARRAY_SIZE(log->entry);
5488 for (i = 0; i < count; i++) {
5489 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5490
5491 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005492 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005493 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005494}
5495
5496static const struct file_operations binder_fops = {
5497 .owner = THIS_MODULE,
5498 .poll = binder_poll,
5499 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005500 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005501 .mmap = binder_mmap,
5502 .open = binder_open,
5503 .flush = binder_flush,
5504 .release = binder_release,
5505};
5506
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005507BINDER_DEBUG_ENTRY(state);
5508BINDER_DEBUG_ENTRY(stats);
5509BINDER_DEBUG_ENTRY(transactions);
5510BINDER_DEBUG_ENTRY(transaction_log);
5511
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005512static int __init init_binder_device(const char *name)
5513{
5514 int ret;
5515 struct binder_device *binder_device;
5516
5517 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5518 if (!binder_device)
5519 return -ENOMEM;
5520
5521 binder_device->miscdev.fops = &binder_fops;
5522 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5523 binder_device->miscdev.name = name;
5524
5525 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5526 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005527 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005528
5529 ret = misc_register(&binder_device->miscdev);
5530 if (ret < 0) {
5531 kfree(binder_device);
5532 return ret;
5533 }
5534
5535 hlist_add_head(&binder_device->hlist, &binder_devices);
5536
5537 return ret;
5538}
5539
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005540static int __init binder_init(void)
5541{
5542 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005543 char *device_name, *device_names;
5544 struct binder_device *device;
5545 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005546
Todd Kjos1cfe6272017-05-24 13:33:28 -07005547 atomic_set(&binder_transaction_log.cur, ~0U);
5548 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5549
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005550 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5551 if (binder_debugfs_dir_entry_root)
5552 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5553 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005554
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005555 if (binder_debugfs_dir_entry_root) {
5556 debugfs_create_file("state",
5557 S_IRUGO,
5558 binder_debugfs_dir_entry_root,
5559 NULL,
5560 &binder_state_fops);
5561 debugfs_create_file("stats",
5562 S_IRUGO,
5563 binder_debugfs_dir_entry_root,
5564 NULL,
5565 &binder_stats_fops);
5566 debugfs_create_file("transactions",
5567 S_IRUGO,
5568 binder_debugfs_dir_entry_root,
5569 NULL,
5570 &binder_transactions_fops);
5571 debugfs_create_file("transaction_log",
5572 S_IRUGO,
5573 binder_debugfs_dir_entry_root,
5574 &binder_transaction_log,
5575 &binder_transaction_log_fops);
5576 debugfs_create_file("failed_transaction_log",
5577 S_IRUGO,
5578 binder_debugfs_dir_entry_root,
5579 &binder_transaction_log_failed,
5580 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005581 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005582
5583 /*
5584 * Copy the module_parameter string, because we don't want to
5585 * tokenize it in-place.
5586 */
5587 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5588 if (!device_names) {
5589 ret = -ENOMEM;
5590 goto err_alloc_device_names_failed;
5591 }
5592 strcpy(device_names, binder_devices_param);
5593
5594 while ((device_name = strsep(&device_names, ","))) {
5595 ret = init_binder_device(device_name);
5596 if (ret)
5597 goto err_init_binder_device_failed;
5598 }
5599
5600 return ret;
5601
5602err_init_binder_device_failed:
5603 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5604 misc_deregister(&device->miscdev);
5605 hlist_del(&device->hlist);
5606 kfree(device);
5607 }
5608err_alloc_device_names_failed:
5609 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5610
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005611 return ret;
5612}
5613
5614device_initcall(binder_init);
5615
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005616#define CREATE_TRACE_POINTS
5617#include "binder_trace.h"
5618
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005619MODULE_LICENSE("GPL v2");