blob: 9de2594e20bb668dec827202c291e4cd5f693533 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjosfc7a7e22017-05-29 16:44:24 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
33 * proc->delivered_death and node->async_todo).
34 * binder_inner_proc_lock() and binder_inner_proc_unlock()
35 * are used to acq/rel
36 *
37 * Any lock under procA must never be nested under any lock at the same
38 * level or below on procB.
39 *
40 * Functions that require a lock held on entry indicate which lock
41 * in the suffix of the function name:
42 *
43 * foo_olocked() : requires node->outer_lock
44 * foo_nlocked() : requires node->lock
45 * foo_ilocked() : requires proc->inner_lock
46 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
47 * foo_nilocked(): requires node->lock and proc->inner_lock
48 * ...
49 */
50
Anmol Sarma56b468f2012-10-30 22:35:43 +053051#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090053#include <asm/cacheflush.h>
54#include <linux/fdtable.h>
55#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000056#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090057#include <linux/fs.h>
58#include <linux/list.h>
59#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090060#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/nsproxy.h>
63#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070064#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090065#include <linux/rbtree.h>
66#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070067#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090068#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080069#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050070#include <linux/security.h>
Todd Kjosfc7a7e22017-05-29 16:44:24 -070071#include <linux/spinlock.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090072
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020073#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
74#define BINDER_IPC_32BIT 1
75#endif
76
77#include <uapi/linux/android/binder.h>
Todd Kjosb9341022016-10-10 10:40:53 -070078#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070079#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090080
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070081static DEFINE_MUTEX(binder_main_lock);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070082
83static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090084static DEFINE_MUTEX(binder_deferred_lock);
85
Martijn Coenen6b7c7122016-09-30 16:08:09 +020086static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090087static HLIST_HEAD(binder_procs);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070088static DEFINE_MUTEX(binder_procs_lock);
89
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090090static HLIST_HEAD(binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -070091static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090092
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070093static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjosc4bd08b2017-05-25 10:56:00 -070095static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090096
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070097#define BINDER_DEBUG_ENTRY(name) \
98static int binder_##name##_open(struct inode *inode, struct file *file) \
99{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700100 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700101} \
102\
103static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
106 .read = seq_read, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
109}
110
111static int binder_proc_show(struct seq_file *m, void *unused);
112BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900113
114/* This is only defined in include/asm-arm/sizes.h */
115#ifndef SZ_1K
116#define SZ_1K 0x400
117#endif
118
119#ifndef SZ_4M
120#define SZ_4M 0x400000
121#endif
122
123#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
124
125#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
126
127enum {
128 BINDER_DEBUG_USER_ERROR = 1U << 0,
129 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
130 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
131 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
132 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
133 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
134 BINDER_DEBUG_READ_WRITE = 1U << 6,
135 BINDER_DEBUG_USER_REFS = 1U << 7,
136 BINDER_DEBUG_THREADS = 1U << 8,
137 BINDER_DEBUG_TRANSACTION = 1U << 9,
138 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
139 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
140 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjosd325d372016-10-10 10:40:53 -0700141 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700142 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900143};
144static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
145 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
146module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
147
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200148static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
149module_param_named(devices, binder_devices_param, charp, S_IRUGO);
150
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900151static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
152static int binder_stop_on_user_error;
153
154static int binder_set_stop_on_user_error(const char *val,
155 struct kernel_param *kp)
156{
157 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900158
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900159 ret = param_set_int(val, kp);
160 if (binder_stop_on_user_error < 2)
161 wake_up(&binder_user_error_wait);
162 return ret;
163}
164module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
165 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
166
167#define binder_debug(mask, x...) \
168 do { \
169 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400170 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900171 } while (0)
172
173#define binder_user_error(x...) \
174 do { \
175 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400176 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900177 if (binder_stop_on_user_error) \
178 binder_stop_on_user_error = 2; \
179 } while (0)
180
Martijn Coenen00c80372016-07-13 12:06:49 +0200181#define to_flat_binder_object(hdr) \
182 container_of(hdr, struct flat_binder_object, hdr)
183
184#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
185
Martijn Coenen5a6da532016-09-30 14:10:07 +0200186#define to_binder_buffer_object(hdr) \
187 container_of(hdr, struct binder_buffer_object, hdr)
188
Martijn Coenene3e0f4802016-10-18 13:58:55 +0200189#define to_binder_fd_array_object(hdr) \
190 container_of(hdr, struct binder_fd_array_object, hdr)
191
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900192enum binder_stat_types {
193 BINDER_STAT_PROC,
194 BINDER_STAT_THREAD,
195 BINDER_STAT_NODE,
196 BINDER_STAT_REF,
197 BINDER_STAT_DEATH,
198 BINDER_STAT_TRANSACTION,
199 BINDER_STAT_TRANSACTION_COMPLETE,
200 BINDER_STAT_COUNT
201};
202
203struct binder_stats {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700204 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
205 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
206 atomic_t obj_created[BINDER_STAT_COUNT];
207 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900208};
209
210static struct binder_stats binder_stats;
211
212static inline void binder_stats_deleted(enum binder_stat_types type)
213{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700214 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900215}
216
217static inline void binder_stats_created(enum binder_stat_types type)
218{
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -0700219 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900220}
221
222struct binder_transaction_log_entry {
223 int debug_id;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700224 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900225 int call_type;
226 int from_proc;
227 int from_thread;
228 int target_handle;
229 int to_proc;
230 int to_thread;
231 int to_node;
232 int data_size;
233 int offsets_size;
Todd Kjose598d172017-03-22 17:19:52 -0700234 int return_error_line;
235 uint32_t return_error;
236 uint32_t return_error_param;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200237 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900238};
239struct binder_transaction_log {
Todd Kjos1cfe6272017-05-24 13:33:28 -0700240 atomic_t cur;
241 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900242 struct binder_transaction_log_entry entry[32];
243};
244static struct binder_transaction_log binder_transaction_log;
245static struct binder_transaction_log binder_transaction_log_failed;
246
247static struct binder_transaction_log_entry *binder_transaction_log_add(
248 struct binder_transaction_log *log)
249{
250 struct binder_transaction_log_entry *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700251 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900252
Todd Kjos1cfe6272017-05-24 13:33:28 -0700253 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900254 log->full = 1;
Todd Kjos1cfe6272017-05-24 13:33:28 -0700255 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
256 WRITE_ONCE(e->debug_id_done, 0);
257 /*
258 * write-barrier to synchronize access to e->debug_id_done.
259 * We make sure the initialized 0 value is seen before
260 * memset() other fields are zeroed by memset.
261 */
262 smp_wmb();
263 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900264 return e;
265}
266
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200267struct binder_context {
268 struct binder_node *binder_context_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -0700269 struct mutex context_mgr_node_lock;
270
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200271 kuid_t binder_context_mgr_uid;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +0200272 const char *name;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200273};
274
Martijn Coenen6b7c7122016-09-30 16:08:09 +0200275struct binder_device {
276 struct hlist_node hlist;
277 struct miscdevice miscdev;
278 struct binder_context context;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200279};
280
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700281/**
282 * struct binder_work - work enqueued on a worklist
283 * @entry: node enqueued on list
284 * @type: type of work to be performed
285 *
286 * There are separate work lists for proc, thread, and node (async).
287 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900288struct binder_work {
289 struct list_head entry;
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700290
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900291 enum {
292 BINDER_WORK_TRANSACTION = 1,
293 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos858b8da2017-04-21 17:35:12 -0700294 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900295 BINDER_WORK_NODE,
296 BINDER_WORK_DEAD_BINDER,
297 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
298 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
299 } type;
300};
301
Todd Kjos858b8da2017-04-21 17:35:12 -0700302struct binder_error {
303 struct binder_work work;
304 uint32_t cmd;
305};
306
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700307/**
308 * struct binder_node - binder node bookkeeping
309 * @debug_id: unique ID for debugging
310 * (invariant after initialized)
311 * @lock: lock for node fields
312 * @work: worklist element for node work
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700313 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700314 * @rb_node: element for proc->nodes tree
Todd Kjos425d23f2017-06-12 12:07:26 -0700315 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700316 * @dead_node: element for binder_dead_nodes list
317 * (protected by binder_dead_nodes_lock)
318 * @proc: binder_proc that owns this node
319 * (invariant after initialized)
320 * @refs: list of references on this node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700321 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700322 * @internal_strong_refs: used to take strong references when
323 * initiating a transaction
Todd Kjose7f23ed2017-03-21 13:06:01 -0700324 * (protected by @proc->inner_lock if @proc
325 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700326 * @local_weak_refs: weak user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700327 * (protected by @proc->inner_lock if @proc
328 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700329 * @local_strong_refs: strong user refs from local process
Todd Kjose7f23ed2017-03-21 13:06:01 -0700330 * (protected by @proc->inner_lock if @proc
331 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700332 * @tmp_refs: temporary kernel refs
Todd Kjose7f23ed2017-03-21 13:06:01 -0700333 * (protected by @proc->inner_lock while @proc
334 * is valid, and by binder_dead_nodes_lock
335 * if @proc is NULL. During inc/dec and node release
336 * it is also protected by @lock to provide safety
337 * as the node dies and @proc becomes NULL)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700338 * @ptr: userspace pointer for node
339 * (invariant, no lock needed)
340 * @cookie: userspace cookie for node
341 * (invariant, no lock needed)
342 * @has_strong_ref: userspace notified of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700343 * (protected by @proc->inner_lock if @proc
344 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700345 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700346 * (protected by @proc->inner_lock if @proc
347 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700348 * @has_weak_ref: userspace notified of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700349 * (protected by @proc->inner_lock if @proc
350 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700351 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjose7f23ed2017-03-21 13:06:01 -0700352 * (protected by @proc->inner_lock if @proc
353 * and by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700354 * @has_async_transaction: async transaction to node in progress
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700355 * (protected by @lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700356 * @accept_fds: file descriptor operations supported for node
357 * (invariant after initialized)
358 * @min_priority: minimum scheduling priority
359 * (invariant after initialized)
360 * @async_todo: list of async work items
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700361 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700362 *
363 * Bookkeeping structure for binder nodes.
364 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900365struct binder_node {
366 int debug_id;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700367 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900368 struct binder_work work;
369 union {
370 struct rb_node rb_node;
371 struct hlist_node dead_node;
372 };
373 struct binder_proc *proc;
374 struct hlist_head refs;
375 int internal_strong_refs;
376 int local_weak_refs;
377 int local_strong_refs;
Todd Kjosf22abc72017-05-09 11:08:05 -0700378 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800379 binder_uintptr_t ptr;
380 binder_uintptr_t cookie;
Todd Kjose7f23ed2017-03-21 13:06:01 -0700381 struct {
382 /*
383 * bitfield elements protected by
384 * proc inner_lock
385 */
386 u8 has_strong_ref:1;
387 u8 pending_strong_ref:1;
388 u8 has_weak_ref:1;
389 u8 pending_weak_ref:1;
390 };
391 struct {
392 /*
393 * invariant after initialization
394 */
395 u8 accept_fds:1;
396 u8 min_priority;
397 };
398 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900399 struct list_head async_todo;
400};
401
402struct binder_ref_death {
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700403 /**
404 * @work: worklist element for death notifications
405 * (protected by inner_lock of the proc that
406 * this ref belongs to)
407 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900408 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800409 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900410};
411
Todd Kjosb0117bb2017-05-08 09:16:27 -0700412/**
413 * struct binder_ref_data - binder_ref counts and id
414 * @debug_id: unique ID for the ref
415 * @desc: unique userspace handle for ref
416 * @strong: strong ref count (debugging only if not locked)
417 * @weak: weak ref count (debugging only if not locked)
418 *
419 * Structure to hold ref count and ref id information. Since
420 * the actual ref can only be accessed with a lock, this structure
421 * is used to return information about the ref to callers of
422 * ref inc/dec functions.
423 */
424struct binder_ref_data {
425 int debug_id;
426 uint32_t desc;
427 int strong;
428 int weak;
429};
430
431/**
432 * struct binder_ref - struct to track references on nodes
433 * @data: binder_ref_data containing id, handle, and current refcounts
434 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
435 * @rb_node_node: node for lookup by @node in proc's rb_tree
436 * @node_entry: list entry for node->refs list in target node
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700437 * (protected by @node->lock)
Todd Kjosb0117bb2017-05-08 09:16:27 -0700438 * @proc: binder_proc containing ref
439 * @node: binder_node of target node. When cleaning up a
440 * ref for deletion in binder_cleanup_ref, a non-NULL
441 * @node indicates the node must be freed
442 * @death: pointer to death notification (ref_death) if requested
443 *
444 * Structure to track references from procA to target node (on procB). This
445 * structure is unsafe to access without holding @proc->outer_lock.
446 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900447struct binder_ref {
448 /* Lookups needed: */
449 /* node + proc => ref (transaction) */
450 /* desc + proc => ref (transaction, inc/dec ref) */
451 /* node => refs + procs (proc exit) */
Todd Kjosb0117bb2017-05-08 09:16:27 -0700452 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900453 struct rb_node rb_node_desc;
454 struct rb_node rb_node_node;
455 struct hlist_node node_entry;
456 struct binder_proc *proc;
457 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900458 struct binder_ref_death *death;
459};
460
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900461enum binder_deferred_state {
462 BINDER_DEFERRED_PUT_FILES = 0x01,
463 BINDER_DEFERRED_FLUSH = 0x02,
464 BINDER_DEFERRED_RELEASE = 0x04,
465};
466
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700467/**
468 * struct binder_proc - binder process bookkeeping
469 * @proc_node: element for binder_procs list
470 * @threads: rbtree of binder_threads in this proc
Todd Kjosb4827902017-05-25 15:52:17 -0700471 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700472 * @nodes: rbtree of binder nodes associated with
473 * this proc ordered by node->ptr
Todd Kjos425d23f2017-06-12 12:07:26 -0700474 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700475 * @refs_by_desc: rbtree of refs ordered by ref->desc
476 * @refs_by_node: rbtree of refs ordered by ref->node
477 * @pid PID of group_leader of process
478 * (invariant after initialized)
479 * @tsk task_struct for group_leader of process
480 * (invariant after initialized)
481 * @files files_struct for process
482 * (invariant after initialized)
483 * @deferred_work_node: element for binder_deferred_list
484 * (protected by binder_deferred_lock)
485 * @deferred_work: bitmap of deferred work to perform
486 * (protected by binder_deferred_lock)
487 * @is_dead: process is dead and awaiting free
488 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700489 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700490 * @todo: list of work for this process
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700491 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700492 * @wait: wait queue head to wait for proc work
493 * (invariant after initialized)
494 * @stats: per-process binder statistics
495 * (atomics, no lock needed)
496 * @delivered_death: list of delivered death notification
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700497 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700498 * @max_threads: cap on number of binder threads
499 * @requested_threads: number of binder threads requested but not
500 * yet started. In current implementation, can
501 * only be 0 or 1.
502 * @requested_threads_started: number binder threads started
503 * @ready_threads: number of threads waiting for proc work
504 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjosb4827902017-05-25 15:52:17 -0700505 * (protected by @inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700506 * @default_priority: default scheduler priority
507 * (invariant after initialized)
508 * @debugfs_entry: debugfs node
509 * @alloc: binder allocator bookkeeping
510 * @context: binder_context for this proc
511 * (invariant after initialized)
512 * @inner_lock: can nest under outer_lock and/or node lock
513 * @outer_lock: no nesting under innor or node lock
514 * Lock order: 1) outer, 2) node, 3) inner
515 *
516 * Bookkeeping structure for binder processes
517 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900518struct binder_proc {
519 struct hlist_node proc_node;
520 struct rb_root threads;
521 struct rb_root nodes;
522 struct rb_root refs_by_desc;
523 struct rb_root refs_by_node;
524 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900525 struct task_struct *tsk;
526 struct files_struct *files;
527 struct hlist_node deferred_work_node;
528 int deferred_work;
Todd Kjos2f993e22017-05-12 14:42:55 -0700529 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900530
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900531 struct list_head todo;
532 wait_queue_head_t wait;
533 struct binder_stats stats;
534 struct list_head delivered_death;
535 int max_threads;
536 int requested_threads;
537 int requested_threads_started;
538 int ready_threads;
Todd Kjos2f993e22017-05-12 14:42:55 -0700539 int tmp_ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900540 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700541 struct dentry *debugfs_entry;
Todd Kjosf85d2292016-10-10 10:39:59 -0700542 struct binder_alloc alloc;
Martijn Coenen0b3311e2016-09-30 15:51:48 +0200543 struct binder_context *context;
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700544 spinlock_t inner_lock;
545 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900546};
547
548enum {
549 BINDER_LOOPER_STATE_REGISTERED = 0x01,
550 BINDER_LOOPER_STATE_ENTERED = 0x02,
551 BINDER_LOOPER_STATE_EXITED = 0x04,
552 BINDER_LOOPER_STATE_INVALID = 0x08,
553 BINDER_LOOPER_STATE_WAITING = 0x10,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900554};
555
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700556/**
557 * struct binder_thread - binder thread bookkeeping
558 * @proc: binder process for this thread
559 * (invariant after initialization)
560 * @rb_node: element for proc->threads rbtree
Todd Kjosb4827902017-05-25 15:52:17 -0700561 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700562 * @pid: PID for this thread
563 * (invariant after initialization)
564 * @looper: bitmap of looping state
565 * (only accessed by this thread)
566 * @looper_needs_return: looping thread needs to exit driver
567 * (no lock needed)
568 * @transaction_stack: stack of in-progress transactions for this thread
569 * @todo: list of work to do for this thread
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700570 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700571 * @return_error: transaction errors reported by this thread
572 * (only accessed by this thread)
573 * @reply_error: transaction errors reported by target thread
574 * @wait: wait queue for thread work
575 * @stats: per-thread statistics
576 * (atomics, no lock needed)
577 * @tmp_ref: temporary reference to indicate thread is in use
578 * (atomic since @proc->inner_lock cannot
579 * always be acquired)
580 * @is_dead: thread is dead and awaiting free
581 * when outstanding transactions are cleaned up
Todd Kjosb4827902017-05-25 15:52:17 -0700582 * (protected by @proc->inner_lock)
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700583 *
584 * Bookkeeping structure for binder threads.
585 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900586struct binder_thread {
587 struct binder_proc *proc;
588 struct rb_node rb_node;
589 int pid;
Todd Kjos6798e6d2017-01-06 14:19:25 -0800590 int looper; /* only modified by this thread */
591 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900592 struct binder_transaction *transaction_stack;
593 struct list_head todo;
Todd Kjos858b8da2017-04-21 17:35:12 -0700594 struct binder_error return_error;
595 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900596 wait_queue_head_t wait;
597 struct binder_stats stats;
Todd Kjos2f993e22017-05-12 14:42:55 -0700598 atomic_t tmp_ref;
599 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900600};
601
602struct binder_transaction {
603 int debug_id;
604 struct binder_work work;
605 struct binder_thread *from;
606 struct binder_transaction *from_parent;
607 struct binder_proc *to_proc;
608 struct binder_thread *to_thread;
609 struct binder_transaction *to_parent;
610 unsigned need_reply:1;
611 /* unsigned is_dead:1; */ /* not used at the moment */
612
613 struct binder_buffer *buffer;
614 unsigned int code;
615 unsigned int flags;
616 long priority;
617 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600618 kuid_t sender_euid;
Todd Kjos2f993e22017-05-12 14:42:55 -0700619 /**
620 * @lock: protects @from, @to_proc, and @to_thread
621 *
622 * @from, @to_proc, and @to_thread can be set to NULL
623 * during thread teardown
624 */
625 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900626};
627
Todd Kjosfc7a7e22017-05-29 16:44:24 -0700628/**
629 * binder_proc_lock() - Acquire outer lock for given binder_proc
630 * @proc: struct binder_proc to acquire
631 *
632 * Acquires proc->outer_lock. Used to protect binder_ref
633 * structures associated with the given proc.
634 */
635#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
636static void
637_binder_proc_lock(struct binder_proc *proc, int line)
638{
639 binder_debug(BINDER_DEBUG_SPINLOCKS,
640 "%s: line=%d\n", __func__, line);
641 spin_lock(&proc->outer_lock);
642}
643
644/**
645 * binder_proc_unlock() - Release spinlock for given binder_proc
646 * @proc: struct binder_proc to acquire
647 *
648 * Release lock acquired via binder_proc_lock()
649 */
650#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
651static void
652_binder_proc_unlock(struct binder_proc *proc, int line)
653{
654 binder_debug(BINDER_DEBUG_SPINLOCKS,
655 "%s: line=%d\n", __func__, line);
656 spin_unlock(&proc->outer_lock);
657}
658
659/**
660 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
661 * @proc: struct binder_proc to acquire
662 *
663 * Acquires proc->inner_lock. Used to protect todo lists
664 */
665#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
666static void
667_binder_inner_proc_lock(struct binder_proc *proc, int line)
668{
669 binder_debug(BINDER_DEBUG_SPINLOCKS,
670 "%s: line=%d\n", __func__, line);
671 spin_lock(&proc->inner_lock);
672}
673
674/**
675 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
676 * @proc: struct binder_proc to acquire
677 *
678 * Release lock acquired via binder_inner_proc_lock()
679 */
680#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
681static void
682_binder_inner_proc_unlock(struct binder_proc *proc, int line)
683{
684 binder_debug(BINDER_DEBUG_SPINLOCKS,
685 "%s: line=%d\n", __func__, line);
686 spin_unlock(&proc->inner_lock);
687}
688
689/**
690 * binder_node_lock() - Acquire spinlock for given binder_node
691 * @node: struct binder_node to acquire
692 *
693 * Acquires node->lock. Used to protect binder_node fields
694 */
695#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
696static void
697_binder_node_lock(struct binder_node *node, int line)
698{
699 binder_debug(BINDER_DEBUG_SPINLOCKS,
700 "%s: line=%d\n", __func__, line);
701 spin_lock(&node->lock);
702}
703
704/**
705 * binder_node_unlock() - Release spinlock for given binder_proc
706 * @node: struct binder_node to acquire
707 *
708 * Release lock acquired via binder_node_lock()
709 */
710#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
711static void
712_binder_node_unlock(struct binder_node *node, int line)
713{
714 binder_debug(BINDER_DEBUG_SPINLOCKS,
715 "%s: line=%d\n", __func__, line);
716 spin_unlock(&node->lock);
717}
718
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700719/**
720 * binder_node_inner_lock() - Acquire node and inner locks
721 * @node: struct binder_node to acquire
722 *
723 * Acquires node->lock. If node->proc also acquires
724 * proc->inner_lock. Used to protect binder_node fields
725 */
726#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
727static void
728_binder_node_inner_lock(struct binder_node *node, int line)
729{
730 binder_debug(BINDER_DEBUG_SPINLOCKS,
731 "%s: line=%d\n", __func__, line);
732 spin_lock(&node->lock);
733 if (node->proc)
734 binder_inner_proc_lock(node->proc);
735}
736
737/**
738 * binder_node_unlock() - Release node and inner locks
739 * @node: struct binder_node to acquire
740 *
741 * Release lock acquired via binder_node_lock()
742 */
743#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
744static void
745_binder_node_inner_unlock(struct binder_node *node, int line)
746{
747 struct binder_proc *proc = node->proc;
748
749 binder_debug(BINDER_DEBUG_SPINLOCKS,
750 "%s: line=%d\n", __func__, line);
751 if (proc)
752 binder_inner_proc_unlock(proc);
753 spin_unlock(&node->lock);
754}
755
Todd Kjos1c89e6b2016-10-20 10:33:00 -0700756static bool binder_worklist_empty_ilocked(struct list_head *list)
757{
758 return list_empty(list);
759}
760
761/**
762 * binder_worklist_empty() - Check if no items on the work list
763 * @proc: binder_proc associated with list
764 * @list: list to check
765 *
766 * Return: true if there are no items on list, else false
767 */
768static bool binder_worklist_empty(struct binder_proc *proc,
769 struct list_head *list)
770{
771 bool ret;
772
773 binder_inner_proc_lock(proc);
774 ret = binder_worklist_empty_ilocked(list);
775 binder_inner_proc_unlock(proc);
776 return ret;
777}
778
779static void
780binder_enqueue_work_ilocked(struct binder_work *work,
781 struct list_head *target_list)
782{
783 BUG_ON(target_list == NULL);
784 BUG_ON(work->entry.next && !list_empty(&work->entry));
785 list_add_tail(&work->entry, target_list);
786}
787
788/**
789 * binder_enqueue_work() - Add an item to the work list
790 * @proc: binder_proc associated with list
791 * @work: struct binder_work to add to list
792 * @target_list: list to add work to
793 *
794 * Adds the work to the specified list. Asserts that work
795 * is not already on a list.
796 */
797static void
798binder_enqueue_work(struct binder_proc *proc,
799 struct binder_work *work,
800 struct list_head *target_list)
801{
802 binder_inner_proc_lock(proc);
803 binder_enqueue_work_ilocked(work, target_list);
804 binder_inner_proc_unlock(proc);
805}
806
807static void
808binder_dequeue_work_ilocked(struct binder_work *work)
809{
810 list_del_init(&work->entry);
811}
812
813/**
814 * binder_dequeue_work() - Removes an item from the work list
815 * @proc: binder_proc associated with list
816 * @work: struct binder_work to remove from list
817 *
818 * Removes the specified work item from whatever list it is on.
819 * Can safely be called if work is not on any list.
820 */
821static void
822binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
823{
824 binder_inner_proc_lock(proc);
825 binder_dequeue_work_ilocked(work);
826 binder_inner_proc_unlock(proc);
827}
828
829static struct binder_work *binder_dequeue_work_head_ilocked(
830 struct list_head *list)
831{
832 struct binder_work *w;
833
834 w = list_first_entry_or_null(list, struct binder_work, entry);
835 if (w)
836 list_del_init(&w->entry);
837 return w;
838}
839
840/**
841 * binder_dequeue_work_head() - Dequeues the item at head of list
842 * @proc: binder_proc associated with list
843 * @list: list to dequeue head
844 *
845 * Removes the head of the list if there are items on the list
846 *
847 * Return: pointer dequeued binder_work, NULL if list was empty
848 */
849static struct binder_work *binder_dequeue_work_head(
850 struct binder_proc *proc,
851 struct list_head *list)
852{
853 struct binder_work *w;
854
855 binder_inner_proc_lock(proc);
856 w = binder_dequeue_work_head_ilocked(list);
857 binder_inner_proc_unlock(proc);
858 return w;
859}
860
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900861static void
862binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos2f993e22017-05-12 14:42:55 -0700863static void binder_free_thread(struct binder_thread *thread);
864static void binder_free_proc(struct binder_proc *proc);
Todd Kjos425d23f2017-06-12 12:07:26 -0700865static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900866
Sachin Kamatefde99c2012-08-17 16:39:36 +0530867static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900868{
869 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900870 unsigned long rlim_cur;
871 unsigned long irqs;
872
873 if (files == NULL)
874 return -ESRCH;
875
Al Virodcfadfa2012-08-12 17:27:30 -0400876 if (!lock_task_sighand(proc->tsk, &irqs))
877 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900878
Al Virodcfadfa2012-08-12 17:27:30 -0400879 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
880 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900881
Al Virodcfadfa2012-08-12 17:27:30 -0400882 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900883}
884
885/*
886 * copied from fd_install
887 */
888static void task_fd_install(
889 struct binder_proc *proc, unsigned int fd, struct file *file)
890{
Al Virof869e8a2012-08-15 21:06:33 -0400891 if (proc->files)
892 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900893}
894
895/*
896 * copied from sys_close
897 */
898static long task_close_fd(struct binder_proc *proc, unsigned int fd)
899{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900900 int retval;
901
Al Viro483ce1d2012-08-19 12:04:24 -0400902 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900903 return -ESRCH;
904
Al Viro483ce1d2012-08-19 12:04:24 -0400905 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900906 /* can't restart close syscall because file table entry was cleared */
907 if (unlikely(retval == -ERESTARTSYS ||
908 retval == -ERESTARTNOINTR ||
909 retval == -ERESTARTNOHAND ||
910 retval == -ERESTART_RESTARTBLOCK))
911 retval = -EINTR;
912
913 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900914}
915
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700916static inline void binder_lock(const char *tag)
917{
918 trace_binder_lock(tag);
919 mutex_lock(&binder_main_lock);
920 trace_binder_locked(tag);
921}
922
923static inline void binder_unlock(const char *tag)
924{
925 trace_binder_unlock(tag);
926 mutex_unlock(&binder_main_lock);
927}
928
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900929static void binder_set_nice(long nice)
930{
931 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900932
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900933 if (can_nice(current, nice)) {
934 set_user_nice(current, nice);
935 return;
936 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900937 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900938 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530939 "%d: nice value %ld not allowed use %ld instead\n",
940 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900941 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800942 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900943 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530944 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900945}
946
Todd Kjos425d23f2017-06-12 12:07:26 -0700947static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
948 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900949{
950 struct rb_node *n = proc->nodes.rb_node;
951 struct binder_node *node;
952
Todd Kjos425d23f2017-06-12 12:07:26 -0700953 BUG_ON(!spin_is_locked(&proc->inner_lock));
954
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900955 while (n) {
956 node = rb_entry(n, struct binder_node, rb_node);
957
958 if (ptr < node->ptr)
959 n = n->rb_left;
960 else if (ptr > node->ptr)
961 n = n->rb_right;
Todd Kjosf22abc72017-05-09 11:08:05 -0700962 else {
963 /*
964 * take an implicit weak reference
965 * to ensure node stays alive until
966 * call to binder_put_node()
967 */
Todd Kjos425d23f2017-06-12 12:07:26 -0700968 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900969 return node;
Todd Kjosf22abc72017-05-09 11:08:05 -0700970 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900971 }
972 return NULL;
973}
974
Todd Kjos425d23f2017-06-12 12:07:26 -0700975static struct binder_node *binder_get_node(struct binder_proc *proc,
976 binder_uintptr_t ptr)
977{
978 struct binder_node *node;
979
980 binder_inner_proc_lock(proc);
981 node = binder_get_node_ilocked(proc, ptr);
982 binder_inner_proc_unlock(proc);
983 return node;
984}
985
986static struct binder_node *binder_init_node_ilocked(
987 struct binder_proc *proc,
988 struct binder_node *new_node,
989 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900990{
991 struct rb_node **p = &proc->nodes.rb_node;
992 struct rb_node *parent = NULL;
993 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -0700994 binder_uintptr_t ptr = fp ? fp->binder : 0;
995 binder_uintptr_t cookie = fp ? fp->cookie : 0;
996 __u32 flags = fp ? fp->flags : 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900997
Todd Kjos425d23f2017-06-12 12:07:26 -0700998 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900999 while (*p) {
Todd Kjos425d23f2017-06-12 12:07:26 -07001000
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001001 parent = *p;
1002 node = rb_entry(parent, struct binder_node, rb_node);
1003
1004 if (ptr < node->ptr)
1005 p = &(*p)->rb_left;
1006 else if (ptr > node->ptr)
1007 p = &(*p)->rb_right;
Todd Kjos425d23f2017-06-12 12:07:26 -07001008 else {
1009 /*
1010 * A matching node is already in
1011 * the rb tree. Abandon the init
1012 * and return it.
1013 */
1014 binder_inc_node_tmpref_ilocked(node);
1015 return node;
1016 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001017 }
Todd Kjos425d23f2017-06-12 12:07:26 -07001018 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001019 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosf22abc72017-05-09 11:08:05 -07001020 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001021 rb_link_node(&node->rb_node, parent, p);
1022 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjosc4bd08b2017-05-25 10:56:00 -07001023 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001024 node->proc = proc;
1025 node->ptr = ptr;
1026 node->cookie = cookie;
1027 node->work.type = BINDER_WORK_NODE;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001028 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1029 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Todd Kjosfc7a7e22017-05-29 16:44:24 -07001030 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001031 INIT_LIST_HEAD(&node->work.entry);
1032 INIT_LIST_HEAD(&node->async_todo);
1033 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001034 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001035 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001036 (u64)node->ptr, (u64)node->cookie);
Todd Kjos425d23f2017-06-12 12:07:26 -07001037
1038 return node;
1039}
1040
1041static struct binder_node *binder_new_node(struct binder_proc *proc,
1042 struct flat_binder_object *fp)
1043{
1044 struct binder_node *node;
1045 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1046
1047 if (!new_node)
1048 return NULL;
1049 binder_inner_proc_lock(proc);
1050 node = binder_init_node_ilocked(proc, new_node, fp);
1051 binder_inner_proc_unlock(proc);
1052 if (node != new_node)
1053 /*
1054 * The node was already added by another thread
1055 */
1056 kfree(new_node);
1057
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001058 return node;
1059}
1060
Todd Kjose7f23ed2017-03-21 13:06:01 -07001061static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001062{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001063 kfree(node);
1064 binder_stats_deleted(BINDER_STAT_NODE);
1065}
1066
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001067static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1068 int internal,
1069 struct list_head *target_list)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001070{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001071 struct binder_proc *proc = node->proc;
1072
1073 BUG_ON(!spin_is_locked(&node->lock));
1074 if (proc)
1075 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001076 if (strong) {
1077 if (internal) {
1078 if (target_list == NULL &&
1079 node->internal_strong_refs == 0 &&
Martijn Coenen0b3311e2016-09-30 15:51:48 +02001080 !(node->proc &&
1081 node == node->proc->context->
1082 binder_context_mgr_node &&
1083 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301084 pr_err("invalid inc strong node for %d\n",
1085 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001086 return -EINVAL;
1087 }
1088 node->internal_strong_refs++;
1089 } else
1090 node->local_strong_refs++;
1091 if (!node->has_strong_ref && target_list) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001092 binder_dequeue_work_ilocked(&node->work);
1093 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001094 }
1095 } else {
1096 if (!internal)
1097 node->local_weak_refs++;
1098 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1099 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301100 pr_err("invalid inc weak node for %d\n",
1101 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001102 return -EINVAL;
1103 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001104 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001105 }
1106 }
1107 return 0;
1108}
1109
Todd Kjose7f23ed2017-03-21 13:06:01 -07001110static int binder_inc_node(struct binder_node *node, int strong, int internal,
1111 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001112{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001113 int ret;
1114
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001115 binder_node_inner_lock(node);
1116 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1117 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001118
1119 return ret;
1120}
1121
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001122static bool binder_dec_node_nilocked(struct binder_node *node,
1123 int strong, int internal)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001124{
1125 struct binder_proc *proc = node->proc;
1126
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001127 BUG_ON(!spin_is_locked(&node->lock));
Todd Kjose7f23ed2017-03-21 13:06:01 -07001128 if (proc)
1129 BUG_ON(!spin_is_locked(&proc->inner_lock));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001130 if (strong) {
1131 if (internal)
1132 node->internal_strong_refs--;
1133 else
1134 node->local_strong_refs--;
1135 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001136 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001137 } else {
1138 if (!internal)
1139 node->local_weak_refs--;
Todd Kjosf22abc72017-05-09 11:08:05 -07001140 if (node->local_weak_refs || node->tmp_refs ||
1141 !hlist_empty(&node->refs))
Todd Kjose7f23ed2017-03-21 13:06:01 -07001142 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001143 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001144
1145 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001146 if (list_empty(&node->work.entry)) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001147 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001148 wake_up_interruptible(&node->proc->wait);
1149 }
1150 } else {
1151 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosf22abc72017-05-09 11:08:05 -07001152 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07001153 if (proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001154 binder_dequeue_work_ilocked(&node->work);
1155 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001156 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301157 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001158 node->debug_id);
1159 } else {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001160 BUG_ON(!list_empty(&node->work.entry));
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001161 spin_lock(&binder_dead_nodes_lock);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001162 /*
1163 * tmp_refs could have changed so
1164 * check it again
1165 */
1166 if (node->tmp_refs) {
1167 spin_unlock(&binder_dead_nodes_lock);
1168 return false;
1169 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001170 hlist_del(&node->dead_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07001171 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001172 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301173 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001174 node->debug_id);
1175 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001176 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001177 }
1178 }
Todd Kjose7f23ed2017-03-21 13:06:01 -07001179 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001180}
1181
Todd Kjose7f23ed2017-03-21 13:06:01 -07001182static void binder_dec_node(struct binder_node *node, int strong, int internal)
1183{
1184 bool free_node;
1185
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001186 binder_node_inner_lock(node);
1187 free_node = binder_dec_node_nilocked(node, strong, internal);
1188 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001189 if (free_node)
1190 binder_free_node(node);
1191}
1192
1193static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosf22abc72017-05-09 11:08:05 -07001194{
1195 /*
1196 * No call to binder_inc_node() is needed since we
1197 * don't need to inform userspace of any changes to
1198 * tmp_refs
1199 */
1200 node->tmp_refs++;
1201}
1202
1203/**
Todd Kjose7f23ed2017-03-21 13:06:01 -07001204 * binder_inc_node_tmpref() - take a temporary reference on node
1205 * @node: node to reference
1206 *
1207 * Take reference on node to prevent the node from being freed
1208 * while referenced only by a local variable. The inner lock is
1209 * needed to serialize with the node work on the queue (which
1210 * isn't needed after the node is dead). If the node is dead
1211 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1212 * node->tmp_refs against dead-node-only cases where the node
1213 * lock cannot be acquired (eg traversing the dead node list to
1214 * print nodes)
1215 */
1216static void binder_inc_node_tmpref(struct binder_node *node)
1217{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001218 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001219 if (node->proc)
1220 binder_inner_proc_lock(node->proc);
1221 else
1222 spin_lock(&binder_dead_nodes_lock);
1223 binder_inc_node_tmpref_ilocked(node);
1224 if (node->proc)
1225 binder_inner_proc_unlock(node->proc);
1226 else
1227 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001228 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001229}
1230
1231/**
Todd Kjosf22abc72017-05-09 11:08:05 -07001232 * binder_dec_node_tmpref() - remove a temporary reference on node
1233 * @node: node to reference
1234 *
1235 * Release temporary reference on node taken via binder_inc_node_tmpref()
1236 */
1237static void binder_dec_node_tmpref(struct binder_node *node)
1238{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001239 bool free_node;
1240
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001241 binder_node_inner_lock(node);
1242 if (!node->proc)
Todd Kjose7f23ed2017-03-21 13:06:01 -07001243 spin_lock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001244 node->tmp_refs--;
1245 BUG_ON(node->tmp_refs < 0);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001246 if (!node->proc)
1247 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosf22abc72017-05-09 11:08:05 -07001248 /*
1249 * Call binder_dec_node() to check if all refcounts are 0
1250 * and cleanup is needed. Calling with strong=0 and internal=1
1251 * causes no actual reference to be released in binder_dec_node().
1252 * If that changes, a change is needed here too.
1253 */
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001254 free_node = binder_dec_node_nilocked(node, 0, 1);
1255 binder_node_inner_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001256 if (free_node)
1257 binder_free_node(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07001258}
1259
1260static void binder_put_node(struct binder_node *node)
1261{
1262 binder_dec_node_tmpref(node);
1263}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001264
1265static struct binder_ref *binder_get_ref(struct binder_proc *proc,
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001266 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001267{
1268 struct rb_node *n = proc->refs_by_desc.rb_node;
1269 struct binder_ref *ref;
1270
1271 while (n) {
1272 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1273
Todd Kjosb0117bb2017-05-08 09:16:27 -07001274 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001275 n = n->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001276 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001277 n = n->rb_right;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001278 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001279 binder_user_error("tried to use weak ref as strong ref\n");
1280 return NULL;
1281 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001282 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001283 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001284 }
1285 return NULL;
1286}
1287
Todd Kjosb0117bb2017-05-08 09:16:27 -07001288/**
1289 * binder_get_ref_for_node() - get the ref associated with given node
1290 * @proc: binder_proc that owns the ref
1291 * @node: binder_node of target
1292 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1293 *
1294 * Look up the ref for the given node and return it if it exists
1295 *
1296 * If it doesn't exist and the caller provides a newly allocated
1297 * ref, initialize the fields of the newly allocated ref and insert
1298 * into the given proc rb_trees and node refs list.
1299 *
1300 * Return: the ref for node. It is possible that another thread
1301 * allocated/initialized the ref first in which case the
1302 * returned ref would be different than the passed-in
1303 * new_ref. new_ref must be kfree'd by the caller in
1304 * this case.
1305 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001306static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
Todd Kjosb0117bb2017-05-08 09:16:27 -07001307 struct binder_node *node,
1308 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001309{
Todd Kjosb0117bb2017-05-08 09:16:27 -07001310 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001311 struct rb_node **p = &proc->refs_by_node.rb_node;
1312 struct rb_node *parent = NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001313 struct binder_ref *ref;
1314 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001315
1316 while (*p) {
1317 parent = *p;
1318 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1319
1320 if (node < ref->node)
1321 p = &(*p)->rb_left;
1322 else if (node > ref->node)
1323 p = &(*p)->rb_right;
1324 else
1325 return ref;
1326 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001327 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001328 return NULL;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001329
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001330 binder_stats_created(BINDER_STAT_REF);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001331 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001332 new_ref->proc = proc;
1333 new_ref->node = node;
1334 rb_link_node(&new_ref->rb_node_node, parent, p);
1335 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1336
Todd Kjosb0117bb2017-05-08 09:16:27 -07001337 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001338 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1339 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001340 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001341 break;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001342 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001343 }
1344
1345 p = &proc->refs_by_desc.rb_node;
1346 while (*p) {
1347 parent = *p;
1348 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1349
Todd Kjosb0117bb2017-05-08 09:16:27 -07001350 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001351 p = &(*p)->rb_left;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001352 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001353 p = &(*p)->rb_right;
1354 else
1355 BUG();
1356 }
1357 rb_link_node(&new_ref->rb_node_desc, parent, p);
1358 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001359
1360 binder_node_lock(node);
Todd Kjos4cbe5752017-05-01 17:21:51 -07001361 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001362
Todd Kjos4cbe5752017-05-01 17:21:51 -07001363 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1364 "%d new ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001365 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjos4cbe5752017-05-01 17:21:51 -07001366 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001367 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001368 return new_ref;
1369}
1370
Todd Kjosb0117bb2017-05-08 09:16:27 -07001371static void binder_cleanup_ref(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001372{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001373 bool delete_node = false;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001374
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001375 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301376 "%d delete ref %d desc %d for node %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001377 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301378 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001379
1380 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1381 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001382
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001383 binder_node_inner_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001384 if (ref->data.strong)
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001385 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001386
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001387 hlist_del(&ref->node_entry);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07001388 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1389 binder_node_inner_unlock(ref->node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07001390 /*
1391 * Clear ref->node unless we want the caller to free the node
1392 */
1393 if (!delete_node) {
1394 /*
1395 * The caller uses ref->node to determine
1396 * whether the node needs to be freed. Clear
1397 * it since the node is still alive.
1398 */
1399 ref->node = NULL;
1400 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001401
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001402 if (ref->death) {
1403 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301404 "%d delete ref %d desc %d has death notification\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001405 ref->proc->pid, ref->data.debug_id,
1406 ref->data.desc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001407 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001408 binder_stats_deleted(BINDER_STAT_DEATH);
1409 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001410 binder_stats_deleted(BINDER_STAT_REF);
1411}
1412
Todd Kjosb0117bb2017-05-08 09:16:27 -07001413/**
1414 * binder_inc_ref() - increment the ref for given handle
1415 * @ref: ref to be incremented
1416 * @strong: if true, strong increment, else weak
1417 * @target_list: list to queue node work on
1418 *
1419 * Increment the ref.
1420 *
1421 * Return: 0, if successful, else errno
1422 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001423static int binder_inc_ref(struct binder_ref *ref, int strong,
1424 struct list_head *target_list)
1425{
1426 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001427
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001428 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001429 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001430 ret = binder_inc_node(ref->node, 1, 1, target_list);
1431 if (ret)
1432 return ret;
1433 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001434 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001435 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001436 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001437 ret = binder_inc_node(ref->node, 0, 1, target_list);
1438 if (ret)
1439 return ret;
1440 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001441 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001442 }
1443 return 0;
1444}
1445
Todd Kjosb0117bb2017-05-08 09:16:27 -07001446/**
1447 * binder_dec_ref() - dec the ref for given handle
1448 * @ref: ref to be decremented
1449 * @strong: if true, strong decrement, else weak
1450 *
1451 * Decrement the ref.
1452 *
1453 * TODO: kfree is avoided here since an upcoming patch
1454 * will put this under a lock.
1455 *
1456 * Return: true if ref is cleaned up and ready to be freed
1457 */
1458static bool binder_dec_ref(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001459{
1460 if (strong) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001461 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301462 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001463 ref->proc->pid, ref->data.debug_id,
1464 ref->data.desc, ref->data.strong,
1465 ref->data.weak);
1466 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001467 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001468 ref->data.strong--;
Todd Kjose7f23ed2017-03-21 13:06:01 -07001469 if (ref->data.strong == 0)
1470 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001471 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07001472 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301473 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07001474 ref->proc->pid, ref->data.debug_id,
1475 ref->data.desc, ref->data.strong,
1476 ref->data.weak);
1477 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001478 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001479 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001480 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07001481 if (ref->data.strong == 0 && ref->data.weak == 0) {
1482 binder_cleanup_ref(ref);
1483 /*
1484 * TODO: we could kfree(ref) here, but an upcoming
1485 * patch will call this with a lock held, so we
1486 * return an indication that the ref should be
1487 * freed.
1488 */
1489 return true;
1490 }
1491 return false;
1492}
1493
1494/**
1495 * binder_get_node_from_ref() - get the node from the given proc/desc
1496 * @proc: proc containing the ref
1497 * @desc: the handle associated with the ref
1498 * @need_strong_ref: if true, only return node if ref is strong
1499 * @rdata: the id/refcount data for the ref
1500 *
1501 * Given a proc and ref handle, return the associated binder_node
1502 *
1503 * Return: a binder_node or NULL if not found or not strong when strong required
1504 */
1505static struct binder_node *binder_get_node_from_ref(
1506 struct binder_proc *proc,
1507 u32 desc, bool need_strong_ref,
1508 struct binder_ref_data *rdata)
1509{
1510 struct binder_node *node;
1511 struct binder_ref *ref;
1512
1513 ref = binder_get_ref(proc, desc, need_strong_ref);
1514 if (!ref)
1515 goto err_no_ref;
1516 node = ref->node;
Todd Kjosf22abc72017-05-09 11:08:05 -07001517 /*
1518 * Take an implicit reference on the node to ensure
1519 * it stays alive until the call to binder_put_node()
1520 */
1521 binder_inc_node_tmpref(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001522 if (rdata)
1523 *rdata = ref->data;
1524
1525 return node;
1526
1527err_no_ref:
1528 return NULL;
1529}
1530
1531/**
1532 * binder_free_ref() - free the binder_ref
1533 * @ref: ref to free
1534 *
Todd Kjose7f23ed2017-03-21 13:06:01 -07001535 * Free the binder_ref. Free the binder_node indicated by ref->node
1536 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjosb0117bb2017-05-08 09:16:27 -07001537 */
1538static void binder_free_ref(struct binder_ref *ref)
1539{
Todd Kjose7f23ed2017-03-21 13:06:01 -07001540 if (ref->node)
1541 binder_free_node(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001542 kfree(ref->death);
1543 kfree(ref);
1544}
1545
1546/**
1547 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1548 * @proc: proc containing the ref
1549 * @desc: the handle associated with the ref
1550 * @increment: true=inc reference, false=dec reference
1551 * @strong: true=strong reference, false=weak reference
1552 * @rdata: the id/refcount data for the ref
1553 *
1554 * Given a proc and ref handle, increment or decrement the ref
1555 * according to "increment" arg.
1556 *
1557 * Return: 0 if successful, else errno
1558 */
1559static int binder_update_ref_for_handle(struct binder_proc *proc,
1560 uint32_t desc, bool increment, bool strong,
1561 struct binder_ref_data *rdata)
1562{
1563 int ret = 0;
1564 struct binder_ref *ref;
1565 bool delete_ref = false;
1566
1567 ref = binder_get_ref(proc, desc, strong);
1568 if (!ref) {
1569 ret = -EINVAL;
1570 goto err_no_ref;
1571 }
1572 if (increment)
1573 ret = binder_inc_ref(ref, strong, NULL);
1574 else
1575 delete_ref = binder_dec_ref(ref, strong);
1576
1577 if (rdata)
1578 *rdata = ref->data;
1579
1580 if (delete_ref)
1581 binder_free_ref(ref);
1582 return ret;
1583
1584err_no_ref:
1585 return ret;
1586}
1587
1588/**
1589 * binder_dec_ref_for_handle() - dec the ref for given handle
1590 * @proc: proc containing the ref
1591 * @desc: the handle associated with the ref
1592 * @strong: true=strong reference, false=weak reference
1593 * @rdata: the id/refcount data for the ref
1594 *
1595 * Just calls binder_update_ref_for_handle() to decrement the ref.
1596 *
1597 * Return: 0 if successful, else errno
1598 */
1599static int binder_dec_ref_for_handle(struct binder_proc *proc,
1600 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1601{
1602 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1603}
1604
1605
1606/**
1607 * binder_inc_ref_for_node() - increment the ref for given proc/node
1608 * @proc: proc containing the ref
1609 * @node: target node
1610 * @strong: true=strong reference, false=weak reference
1611 * @target_list: worklist to use if node is incremented
1612 * @rdata: the id/refcount data for the ref
1613 *
1614 * Given a proc and node, increment the ref. Create the ref if it
1615 * doesn't already exist
1616 *
1617 * Return: 0 if successful, else errno
1618 */
1619static int binder_inc_ref_for_node(struct binder_proc *proc,
1620 struct binder_node *node,
1621 bool strong,
1622 struct list_head *target_list,
1623 struct binder_ref_data *rdata)
1624{
1625 struct binder_ref *ref;
1626 struct binder_ref *new_ref = NULL;
1627 int ret = 0;
1628
1629 ref = binder_get_ref_for_node(proc, node, NULL);
1630 if (!ref) {
1631 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1632 if (!new_ref)
1633 return -ENOMEM;
1634 ref = binder_get_ref_for_node(proc, node, new_ref);
1635 }
1636 ret = binder_inc_ref(ref, strong, target_list);
1637 *rdata = ref->data;
1638 if (new_ref && ref != new_ref)
1639 /*
1640 * Another thread created the ref first so
1641 * free the one we allocated
1642 */
1643 kfree(new_ref);
1644 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001645}
1646
1647static void binder_pop_transaction(struct binder_thread *target_thread,
1648 struct binder_transaction *t)
1649{
Todd Kjos21ef40a2017-03-30 18:02:13 -07001650 BUG_ON(!target_thread);
1651 BUG_ON(target_thread->transaction_stack != t);
1652 BUG_ON(target_thread->transaction_stack->from != target_thread);
1653 target_thread->transaction_stack =
1654 target_thread->transaction_stack->from_parent;
1655 t->from = NULL;
1656}
1657
Todd Kjos2f993e22017-05-12 14:42:55 -07001658/**
1659 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1660 * @thread: thread to decrement
1661 *
1662 * A thread needs to be kept alive while being used to create or
1663 * handle a transaction. binder_get_txn_from() is used to safely
1664 * extract t->from from a binder_transaction and keep the thread
1665 * indicated by t->from from being freed. When done with that
1666 * binder_thread, this function is called to decrement the
1667 * tmp_ref and free if appropriate (thread has been released
1668 * and no transaction being processed by the driver)
1669 */
1670static void binder_thread_dec_tmpref(struct binder_thread *thread)
1671{
1672 /*
1673 * atomic is used to protect the counter value while
1674 * it cannot reach zero or thread->is_dead is false
Todd Kjos2f993e22017-05-12 14:42:55 -07001675 */
Todd Kjosb4827902017-05-25 15:52:17 -07001676 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001677 atomic_dec(&thread->tmp_ref);
1678 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjosb4827902017-05-25 15:52:17 -07001679 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001680 binder_free_thread(thread);
1681 return;
1682 }
Todd Kjosb4827902017-05-25 15:52:17 -07001683 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001684}
1685
1686/**
1687 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1688 * @proc: proc to decrement
1689 *
1690 * A binder_proc needs to be kept alive while being used to create or
1691 * handle a transaction. proc->tmp_ref is incremented when
1692 * creating a new transaction or the binder_proc is currently in-use
1693 * by threads that are being released. When done with the binder_proc,
1694 * this function is called to decrement the counter and free the
1695 * proc if appropriate (proc has been released, all threads have
1696 * been released and not currenly in-use to process a transaction).
1697 */
1698static void binder_proc_dec_tmpref(struct binder_proc *proc)
1699{
Todd Kjosb4827902017-05-25 15:52:17 -07001700 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001701 proc->tmp_ref--;
1702 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1703 !proc->tmp_ref) {
Todd Kjosb4827902017-05-25 15:52:17 -07001704 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001705 binder_free_proc(proc);
1706 return;
1707 }
Todd Kjosb4827902017-05-25 15:52:17 -07001708 binder_inner_proc_unlock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07001709}
1710
1711/**
1712 * binder_get_txn_from() - safely extract the "from" thread in transaction
1713 * @t: binder transaction for t->from
1714 *
1715 * Atomically return the "from" thread and increment the tmp_ref
1716 * count for the thread to ensure it stays alive until
1717 * binder_thread_dec_tmpref() is called.
1718 *
1719 * Return: the value of t->from
1720 */
1721static struct binder_thread *binder_get_txn_from(
1722 struct binder_transaction *t)
1723{
1724 struct binder_thread *from;
1725
1726 spin_lock(&t->lock);
1727 from = t->from;
1728 if (from)
1729 atomic_inc(&from->tmp_ref);
1730 spin_unlock(&t->lock);
1731 return from;
1732}
1733
Todd Kjos21ef40a2017-03-30 18:02:13 -07001734static void binder_free_transaction(struct binder_transaction *t)
1735{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001736 if (t->buffer)
1737 t->buffer->transaction = NULL;
1738 kfree(t);
1739 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1740}
1741
1742static void binder_send_failed_reply(struct binder_transaction *t,
1743 uint32_t error_code)
1744{
1745 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001746 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001747
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001748 BUG_ON(t->flags & TF_ONE_WAY);
1749 while (1) {
Todd Kjos2f993e22017-05-12 14:42:55 -07001750 target_thread = binder_get_txn_from(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001751 if (target_thread) {
Todd Kjos858b8da2017-04-21 17:35:12 -07001752 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1753 "send failed reply for transaction %d to %d:%d\n",
1754 t->debug_id,
1755 target_thread->proc->pid,
1756 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001757
Todd Kjos858b8da2017-04-21 17:35:12 -07001758 binder_pop_transaction(target_thread, t);
1759 if (target_thread->reply_error.cmd == BR_OK) {
1760 target_thread->reply_error.cmd = error_code;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07001761 binder_enqueue_work(
1762 target_thread->proc,
1763 &target_thread->reply_error.work,
Todd Kjos858b8da2017-04-21 17:35:12 -07001764 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001765 wake_up_interruptible(&target_thread->wait);
1766 } else {
Todd Kjos858b8da2017-04-21 17:35:12 -07001767 WARN(1, "Unexpected reply error: %u\n",
1768 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001769 }
Todd Kjos2f993e22017-05-12 14:42:55 -07001770 binder_thread_dec_tmpref(target_thread);
Todd Kjos858b8da2017-04-21 17:35:12 -07001771 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001772 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001773 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001774 next = t->from_parent;
1775
1776 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1777 "send failed reply for transaction %d, target dead\n",
1778 t->debug_id);
1779
Todd Kjos21ef40a2017-03-30 18:02:13 -07001780 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001781 if (next == NULL) {
1782 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1783 "reply failed, no target thread at root\n");
1784 return;
1785 }
1786 t = next;
1787 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1788 "reply failed, no target thread -- retry %d\n",
1789 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001790 }
1791}
1792
Martijn Coenen00c80372016-07-13 12:06:49 +02001793/**
1794 * binder_validate_object() - checks for a valid metadata object in a buffer.
1795 * @buffer: binder_buffer that we're parsing.
1796 * @offset: offset in the buffer at which to validate an object.
1797 *
1798 * Return: If there's a valid metadata object at @offset in @buffer, the
1799 * size of that object. Otherwise, it returns zero.
1800 */
1801static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1802{
1803 /* Check if we can read a header first */
1804 struct binder_object_header *hdr;
1805 size_t object_size = 0;
1806
1807 if (offset > buffer->data_size - sizeof(*hdr) ||
1808 buffer->data_size < sizeof(*hdr) ||
1809 !IS_ALIGNED(offset, sizeof(u32)))
1810 return 0;
1811
1812 /* Ok, now see if we can read a complete object. */
1813 hdr = (struct binder_object_header *)(buffer->data + offset);
1814 switch (hdr->type) {
1815 case BINDER_TYPE_BINDER:
1816 case BINDER_TYPE_WEAK_BINDER:
1817 case BINDER_TYPE_HANDLE:
1818 case BINDER_TYPE_WEAK_HANDLE:
1819 object_size = sizeof(struct flat_binder_object);
1820 break;
1821 case BINDER_TYPE_FD:
1822 object_size = sizeof(struct binder_fd_object);
1823 break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02001824 case BINDER_TYPE_PTR:
1825 object_size = sizeof(struct binder_buffer_object);
1826 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02001827 case BINDER_TYPE_FDA:
1828 object_size = sizeof(struct binder_fd_array_object);
1829 break;
Martijn Coenen00c80372016-07-13 12:06:49 +02001830 default:
1831 return 0;
1832 }
1833 if (offset <= buffer->data_size - object_size &&
1834 buffer->data_size >= object_size)
1835 return object_size;
1836 else
1837 return 0;
1838}
1839
Martijn Coenen5a6da532016-09-30 14:10:07 +02001840/**
1841 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1842 * @b: binder_buffer containing the object
1843 * @index: index in offset array at which the binder_buffer_object is
1844 * located
1845 * @start: points to the start of the offset array
1846 * @num_valid: the number of valid offsets in the offset array
1847 *
1848 * Return: If @index is within the valid range of the offset array
1849 * described by @start and @num_valid, and if there's a valid
1850 * binder_buffer_object at the offset found in index @index
1851 * of the offset array, that object is returned. Otherwise,
1852 * %NULL is returned.
1853 * Note that the offset found in index @index itself is not
1854 * verified; this function assumes that @num_valid elements
1855 * from @start were previously verified to have valid offsets.
1856 */
1857static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1858 binder_size_t index,
1859 binder_size_t *start,
1860 binder_size_t num_valid)
1861{
1862 struct binder_buffer_object *buffer_obj;
1863 binder_size_t *offp;
1864
1865 if (index >= num_valid)
1866 return NULL;
1867
1868 offp = start + index;
1869 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1870 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1871 return NULL;
1872
1873 return buffer_obj;
1874}
1875
1876/**
1877 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1878 * @b: transaction buffer
1879 * @objects_start start of objects buffer
1880 * @buffer: binder_buffer_object in which to fix up
1881 * @offset: start offset in @buffer to fix up
1882 * @last_obj: last binder_buffer_object that we fixed up in
1883 * @last_min_offset: minimum fixup offset in @last_obj
1884 *
1885 * Return: %true if a fixup in buffer @buffer at offset @offset is
1886 * allowed.
1887 *
1888 * For safety reasons, we only allow fixups inside a buffer to happen
1889 * at increasing offsets; additionally, we only allow fixup on the last
1890 * buffer object that was verified, or one of its parents.
1891 *
1892 * Example of what is allowed:
1893 *
1894 * A
1895 * B (parent = A, offset = 0)
1896 * C (parent = A, offset = 16)
1897 * D (parent = C, offset = 0)
1898 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1899 *
1900 * Examples of what is not allowed:
1901 *
1902 * Decreasing offsets within the same parent:
1903 * A
1904 * C (parent = A, offset = 16)
1905 * B (parent = A, offset = 0) // decreasing offset within A
1906 *
1907 * Referring to a parent that wasn't the last object or any of its parents:
1908 * A
1909 * B (parent = A, offset = 0)
1910 * C (parent = A, offset = 0)
1911 * C (parent = A, offset = 16)
1912 * D (parent = B, offset = 0) // B is not A or any of A's parents
1913 */
1914static bool binder_validate_fixup(struct binder_buffer *b,
1915 binder_size_t *objects_start,
1916 struct binder_buffer_object *buffer,
1917 binder_size_t fixup_offset,
1918 struct binder_buffer_object *last_obj,
1919 binder_size_t last_min_offset)
1920{
1921 if (!last_obj) {
1922 /* Nothing to fix up in */
1923 return false;
1924 }
1925
1926 while (last_obj != buffer) {
1927 /*
1928 * Safe to retrieve the parent of last_obj, since it
1929 * was already previously verified by the driver.
1930 */
1931 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1932 return false;
1933 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1934 last_obj = (struct binder_buffer_object *)
1935 (b->data + *(objects_start + last_obj->parent));
1936 }
1937 return (fixup_offset >= last_min_offset);
1938}
1939
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001940static void binder_transaction_buffer_release(struct binder_proc *proc,
1941 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001942 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001943{
Martijn Coenen5a6da532016-09-30 14:10:07 +02001944 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001945 int debug_id = buffer->debug_id;
1946
1947 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301948 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001949 proc->pid, buffer->debug_id,
1950 buffer->data_size, buffer->offsets_size, failed_at);
1951
1952 if (buffer->target_node)
1953 binder_dec_node(buffer->target_node, 1, 0);
1954
Martijn Coenen5a6da532016-09-30 14:10:07 +02001955 off_start = (binder_size_t *)(buffer->data +
1956 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001957 if (failed_at)
1958 off_end = failed_at;
1959 else
Martijn Coenen5a6da532016-09-30 14:10:07 +02001960 off_end = (void *)off_start + buffer->offsets_size;
1961 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02001962 struct binder_object_header *hdr;
1963 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001964
Martijn Coenen00c80372016-07-13 12:06:49 +02001965 if (object_size == 0) {
1966 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001967 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001968 continue;
1969 }
Martijn Coenen00c80372016-07-13 12:06:49 +02001970 hdr = (struct binder_object_header *)(buffer->data + *offp);
1971 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001972 case BINDER_TYPE_BINDER:
1973 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02001974 struct flat_binder_object *fp;
1975 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09001976
Martijn Coenen00c80372016-07-13 12:06:49 +02001977 fp = to_flat_binder_object(hdr);
1978 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001979 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001980 pr_err("transaction release %d bad node %016llx\n",
1981 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001982 break;
1983 }
1984 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001985 " node %d u%016llx\n",
1986 node->debug_id, (u64)node->ptr);
Martijn Coenen00c80372016-07-13 12:06:49 +02001987 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1988 0);
Todd Kjosf22abc72017-05-09 11:08:05 -07001989 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001990 } break;
1991 case BINDER_TYPE_HANDLE:
1992 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02001993 struct flat_binder_object *fp;
Todd Kjosb0117bb2017-05-08 09:16:27 -07001994 struct binder_ref_data rdata;
1995 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001996
Martijn Coenen00c80372016-07-13 12:06:49 +02001997 fp = to_flat_binder_object(hdr);
Todd Kjosb0117bb2017-05-08 09:16:27 -07001998 ret = binder_dec_ref_for_handle(proc, fp->handle,
1999 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2000
2001 if (ret) {
2002 pr_err("transaction release %d bad handle %d, ret = %d\n",
2003 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002004 break;
2005 }
2006 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002007 " ref %d desc %d\n",
2008 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002009 } break;
2010
Martijn Coenen00c80372016-07-13 12:06:49 +02002011 case BINDER_TYPE_FD: {
2012 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2013
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002014 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen00c80372016-07-13 12:06:49 +02002015 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002016 if (failed_at)
Martijn Coenen00c80372016-07-13 12:06:49 +02002017 task_close_fd(proc, fp->fd);
2018 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002019 case BINDER_TYPE_PTR:
2020 /*
2021 * Nothing to do here, this will get cleaned up when the
2022 * transaction buffer gets freed
2023 */
2024 break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002025 case BINDER_TYPE_FDA: {
2026 struct binder_fd_array_object *fda;
2027 struct binder_buffer_object *parent;
2028 uintptr_t parent_buffer;
2029 u32 *fd_array;
2030 size_t fd_index;
2031 binder_size_t fd_buf_size;
2032
2033 fda = to_binder_fd_array_object(hdr);
2034 parent = binder_validate_ptr(buffer, fda->parent,
2035 off_start,
2036 offp - off_start);
2037 if (!parent) {
2038 pr_err("transaction release %d bad parent offset",
2039 debug_id);
2040 continue;
2041 }
2042 /*
2043 * Since the parent was already fixed up, convert it
2044 * back to kernel address space to access it
2045 */
2046 parent_buffer = parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002047 binder_alloc_get_user_buffer_offset(
2048 &proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002049
2050 fd_buf_size = sizeof(u32) * fda->num_fds;
2051 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2052 pr_err("transaction release %d invalid number of fds (%lld)\n",
2053 debug_id, (u64)fda->num_fds);
2054 continue;
2055 }
2056 if (fd_buf_size > parent->length ||
2057 fda->parent_offset > parent->length - fd_buf_size) {
2058 /* No space for all file descriptors here. */
2059 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2060 debug_id, (u64)fda->num_fds);
2061 continue;
2062 }
2063 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2064 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2065 task_close_fd(proc, fd_array[fd_index]);
2066 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002067 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002068 pr_err("transaction release %d bad object type %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002069 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002070 break;
2071 }
2072 }
2073}
2074
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002075static int binder_translate_binder(struct flat_binder_object *fp,
2076 struct binder_transaction *t,
2077 struct binder_thread *thread)
2078{
2079 struct binder_node *node;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002080 struct binder_proc *proc = thread->proc;
2081 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002082 struct binder_ref_data rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002083 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002084
2085 node = binder_get_node(proc, fp->binder);
2086 if (!node) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002087 node = binder_new_node(proc, fp);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002088 if (!node)
2089 return -ENOMEM;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002090 }
2091 if (fp->cookie != node->cookie) {
2092 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2093 proc->pid, thread->pid, (u64)fp->binder,
2094 node->debug_id, (u64)fp->cookie,
2095 (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07002096 ret = -EINVAL;
2097 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002098 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002099 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2100 ret = -EPERM;
2101 goto done;
2102 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002103
Todd Kjosb0117bb2017-05-08 09:16:27 -07002104 ret = binder_inc_ref_for_node(target_proc, node,
2105 fp->hdr.type == BINDER_TYPE_BINDER,
2106 &thread->todo, &rdata);
2107 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002108 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002109
2110 if (fp->hdr.type == BINDER_TYPE_BINDER)
2111 fp->hdr.type = BINDER_TYPE_HANDLE;
2112 else
2113 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2114 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002115 fp->handle = rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002116 fp->cookie = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002117
Todd Kjosb0117bb2017-05-08 09:16:27 -07002118 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002119 binder_debug(BINDER_DEBUG_TRANSACTION,
2120 " node %d u%016llx -> ref %d desc %d\n",
2121 node->debug_id, (u64)node->ptr,
Todd Kjosb0117bb2017-05-08 09:16:27 -07002122 rdata.debug_id, rdata.desc);
Todd Kjosf22abc72017-05-09 11:08:05 -07002123done:
2124 binder_put_node(node);
2125 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002126}
2127
2128static int binder_translate_handle(struct flat_binder_object *fp,
2129 struct binder_transaction *t,
2130 struct binder_thread *thread)
2131{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002132 struct binder_proc *proc = thread->proc;
2133 struct binder_proc *target_proc = t->to_proc;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002134 struct binder_node *node;
2135 struct binder_ref_data src_rdata;
Todd Kjosf22abc72017-05-09 11:08:05 -07002136 int ret = 0;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002137
Todd Kjosb0117bb2017-05-08 09:16:27 -07002138 node = binder_get_node_from_ref(proc, fp->handle,
2139 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2140 if (!node) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002141 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2142 proc->pid, thread->pid, fp->handle);
2143 return -EINVAL;
2144 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002145 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2146 ret = -EPERM;
2147 goto done;
2148 }
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002149
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002150 binder_node_lock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002151 if (node->proc == target_proc) {
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002152 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2153 fp->hdr.type = BINDER_TYPE_BINDER;
2154 else
2155 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002156 fp->binder = node->ptr;
2157 fp->cookie = node->cookie;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002158 if (node->proc)
2159 binder_inner_proc_lock(node->proc);
2160 binder_inc_node_nilocked(node,
2161 fp->hdr.type == BINDER_TYPE_BINDER,
2162 0, NULL);
2163 if (node->proc)
2164 binder_inner_proc_unlock(node->proc);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002165 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002166 binder_debug(BINDER_DEBUG_TRANSACTION,
2167 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002168 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2169 (u64)node->ptr);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002170 binder_node_unlock(node);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002171 } else {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002172 int ret;
2173 struct binder_ref_data dest_rdata;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002174
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002175 binder_node_unlock(node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002176 ret = binder_inc_ref_for_node(target_proc, node,
2177 fp->hdr.type == BINDER_TYPE_HANDLE,
2178 NULL, &dest_rdata);
2179 if (ret)
Todd Kjosf22abc72017-05-09 11:08:05 -07002180 goto done;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002181
2182 fp->binder = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002183 fp->handle = dest_rdata.desc;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002184 fp->cookie = 0;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002185 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2186 &dest_rdata);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002187 binder_debug(BINDER_DEBUG_TRANSACTION,
2188 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjosb0117bb2017-05-08 09:16:27 -07002189 src_rdata.debug_id, src_rdata.desc,
2190 dest_rdata.debug_id, dest_rdata.desc,
2191 node->debug_id);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002192 }
Todd Kjosf22abc72017-05-09 11:08:05 -07002193done:
2194 binder_put_node(node);
2195 return ret;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002196}
2197
2198static int binder_translate_fd(int fd,
2199 struct binder_transaction *t,
2200 struct binder_thread *thread,
2201 struct binder_transaction *in_reply_to)
2202{
2203 struct binder_proc *proc = thread->proc;
2204 struct binder_proc *target_proc = t->to_proc;
2205 int target_fd;
2206 struct file *file;
2207 int ret;
2208 bool target_allows_fd;
2209
2210 if (in_reply_to)
2211 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2212 else
2213 target_allows_fd = t->buffer->target_node->accept_fds;
2214 if (!target_allows_fd) {
2215 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2216 proc->pid, thread->pid,
2217 in_reply_to ? "reply" : "transaction",
2218 fd);
2219 ret = -EPERM;
2220 goto err_fd_not_accepted;
2221 }
2222
2223 file = fget(fd);
2224 if (!file) {
2225 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2226 proc->pid, thread->pid, fd);
2227 ret = -EBADF;
2228 goto err_fget;
2229 }
2230 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2231 if (ret < 0) {
2232 ret = -EPERM;
2233 goto err_security;
2234 }
2235
2236 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2237 if (target_fd < 0) {
2238 ret = -ENOMEM;
2239 goto err_get_unused_fd;
2240 }
2241 task_fd_install(target_proc, target_fd, file);
2242 trace_binder_transaction_fd(t, fd, target_fd);
2243 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2244 fd, target_fd);
2245
2246 return target_fd;
2247
2248err_get_unused_fd:
2249err_security:
2250 fput(file);
2251err_fget:
2252err_fd_not_accepted:
2253 return ret;
2254}
2255
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002256static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2257 struct binder_buffer_object *parent,
2258 struct binder_transaction *t,
2259 struct binder_thread *thread,
2260 struct binder_transaction *in_reply_to)
2261{
2262 binder_size_t fdi, fd_buf_size, num_installed_fds;
2263 int target_fd;
2264 uintptr_t parent_buffer;
2265 u32 *fd_array;
2266 struct binder_proc *proc = thread->proc;
2267 struct binder_proc *target_proc = t->to_proc;
2268
2269 fd_buf_size = sizeof(u32) * fda->num_fds;
2270 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2271 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2272 proc->pid, thread->pid, (u64)fda->num_fds);
2273 return -EINVAL;
2274 }
2275 if (fd_buf_size > parent->length ||
2276 fda->parent_offset > parent->length - fd_buf_size) {
2277 /* No space for all file descriptors here. */
2278 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2279 proc->pid, thread->pid, (u64)fda->num_fds);
2280 return -EINVAL;
2281 }
2282 /*
2283 * Since the parent was already fixed up, convert it
2284 * back to the kernel address space to access it
2285 */
Todd Kjosd325d372016-10-10 10:40:53 -07002286 parent_buffer = parent->buffer -
2287 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002288 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2289 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2290 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2291 proc->pid, thread->pid);
2292 return -EINVAL;
2293 }
2294 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2295 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2296 in_reply_to);
2297 if (target_fd < 0)
2298 goto err_translate_fd_failed;
2299 fd_array[fdi] = target_fd;
2300 }
2301 return 0;
2302
2303err_translate_fd_failed:
2304 /*
2305 * Failed to allocate fd or security error, free fds
2306 * installed so far.
2307 */
2308 num_installed_fds = fdi;
2309 for (fdi = 0; fdi < num_installed_fds; fdi++)
2310 task_close_fd(target_proc, fd_array[fdi]);
2311 return target_fd;
2312}
2313
Martijn Coenen5a6da532016-09-30 14:10:07 +02002314static int binder_fixup_parent(struct binder_transaction *t,
2315 struct binder_thread *thread,
2316 struct binder_buffer_object *bp,
2317 binder_size_t *off_start,
2318 binder_size_t num_valid,
2319 struct binder_buffer_object *last_fixup_obj,
2320 binder_size_t last_fixup_min_off)
2321{
2322 struct binder_buffer_object *parent;
2323 u8 *parent_buffer;
2324 struct binder_buffer *b = t->buffer;
2325 struct binder_proc *proc = thread->proc;
2326 struct binder_proc *target_proc = t->to_proc;
2327
2328 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2329 return 0;
2330
2331 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2332 if (!parent) {
2333 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2334 proc->pid, thread->pid);
2335 return -EINVAL;
2336 }
2337
2338 if (!binder_validate_fixup(b, off_start,
2339 parent, bp->parent_offset,
2340 last_fixup_obj,
2341 last_fixup_min_off)) {
2342 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2343 proc->pid, thread->pid);
2344 return -EINVAL;
2345 }
2346
2347 if (parent->length < sizeof(binder_uintptr_t) ||
2348 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2349 /* No space for a pointer here! */
2350 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2351 proc->pid, thread->pid);
2352 return -EINVAL;
2353 }
2354 parent_buffer = (u8 *)(parent->buffer -
Todd Kjosd325d372016-10-10 10:40:53 -07002355 binder_alloc_get_user_buffer_offset(
2356 &target_proc->alloc));
Martijn Coenen5a6da532016-09-30 14:10:07 +02002357 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2358
2359 return 0;
2360}
2361
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002362static void binder_transaction(struct binder_proc *proc,
2363 struct binder_thread *thread,
Martijn Coenen59878d72016-09-30 14:05:40 +02002364 struct binder_transaction_data *tr, int reply,
2365 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002366{
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002367 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002368 struct binder_transaction *t;
2369 struct binder_work *tcomplete;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002370 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002371 binder_size_t off_min;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002372 u8 *sg_bufp, *sg_buf_end;
Todd Kjos2f993e22017-05-12 14:42:55 -07002373 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002374 struct binder_thread *target_thread = NULL;
2375 struct binder_node *target_node = NULL;
2376 struct list_head *target_list;
2377 wait_queue_head_t *target_wait;
2378 struct binder_transaction *in_reply_to = NULL;
2379 struct binder_transaction_log_entry *e;
Todd Kjose598d172017-03-22 17:19:52 -07002380 uint32_t return_error = 0;
2381 uint32_t return_error_param = 0;
2382 uint32_t return_error_line = 0;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002383 struct binder_buffer_object *last_fixup_obj = NULL;
2384 binder_size_t last_fixup_min_off = 0;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002385 struct binder_context *context = proc->context;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002386 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002387
2388 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002389 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002390 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2391 e->from_proc = proc->pid;
2392 e->from_thread = thread->pid;
2393 e->target_handle = tr->target.handle;
2394 e->data_size = tr->data_size;
2395 e->offsets_size = tr->offsets_size;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02002396 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002397
2398 if (reply) {
2399 in_reply_to = thread->transaction_stack;
2400 if (in_reply_to == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302401 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002402 proc->pid, thread->pid);
2403 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002404 return_error_param = -EPROTO;
2405 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002406 goto err_empty_call_stack;
2407 }
2408 binder_set_nice(in_reply_to->saved_priority);
2409 if (in_reply_to->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002410 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302411 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002412 proc->pid, thread->pid, in_reply_to->debug_id,
2413 in_reply_to->to_proc ?
2414 in_reply_to->to_proc->pid : 0,
2415 in_reply_to->to_thread ?
2416 in_reply_to->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002417 spin_unlock(&in_reply_to->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002418 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002419 return_error_param = -EPROTO;
2420 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002421 in_reply_to = NULL;
2422 goto err_bad_call_stack;
2423 }
2424 thread->transaction_stack = in_reply_to->to_parent;
Todd Kjos2f993e22017-05-12 14:42:55 -07002425 target_thread = binder_get_txn_from(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002426 if (target_thread == NULL) {
2427 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002428 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002429 goto err_dead_binder;
2430 }
2431 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302432 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002433 proc->pid, thread->pid,
2434 target_thread->transaction_stack ?
2435 target_thread->transaction_stack->debug_id : 0,
2436 in_reply_to->debug_id);
2437 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002438 return_error_param = -EPROTO;
2439 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002440 in_reply_to = NULL;
2441 target_thread = NULL;
2442 goto err_dead_binder;
2443 }
2444 target_proc = target_thread->proc;
Todd Kjos2f993e22017-05-12 14:42:55 -07002445 target_proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002446 } else {
2447 if (tr->target.handle) {
2448 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09002449
Todd Kjosc37162d2017-05-26 11:56:29 -07002450 /*
2451 * There must already be a strong ref
2452 * on this node. If so, do a strong
2453 * increment on the node to ensure it
2454 * stays alive until the transaction is
2455 * done.
2456 */
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002457 ref = binder_get_ref(proc, tr->target.handle, true);
Todd Kjosc37162d2017-05-26 11:56:29 -07002458 if (ref) {
2459 binder_inc_node(ref->node, 1, 0, NULL);
2460 target_node = ref->node;
2461 }
2462 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302463 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002464 proc->pid, thread->pid);
2465 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002466 return_error_param = -EINVAL;
2467 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002468 goto err_invalid_target_handle;
2469 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002470 } else {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002471 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002472 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002473 if (target_node == NULL) {
2474 return_error = BR_DEAD_REPLY;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002475 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjose598d172017-03-22 17:19:52 -07002476 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002477 goto err_no_context_mgr_node;
2478 }
Todd Kjosc37162d2017-05-26 11:56:29 -07002479 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002480 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002481 }
2482 e->to_node = target_node->debug_id;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002483 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002484 target_proc = target_node->proc;
2485 if (target_proc == NULL) {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002486 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002487 return_error = BR_DEAD_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002488 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002489 goto err_dead_binder;
2490 }
Todd Kjosb4827902017-05-25 15:52:17 -07002491 binder_inner_proc_lock(target_proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07002492 target_proc->tmp_ref++;
Todd Kjosb4827902017-05-25 15:52:17 -07002493 binder_inner_proc_unlock(target_proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002494 binder_node_unlock(target_node);
Stephen Smalley79af7302015-01-21 10:54:10 -05002495 if (security_binder_transaction(proc->tsk,
2496 target_proc->tsk) < 0) {
2497 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002498 return_error_param = -EPERM;
2499 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05002500 goto err_invalid_target_handle;
2501 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002502 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2503 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002504
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002505 tmp = thread->transaction_stack;
2506 if (tmp->to_thread != thread) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002507 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05302508 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002509 proc->pid, thread->pid, tmp->debug_id,
2510 tmp->to_proc ? tmp->to_proc->pid : 0,
2511 tmp->to_thread ?
2512 tmp->to_thread->pid : 0);
Todd Kjos2f993e22017-05-12 14:42:55 -07002513 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002514 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002515 return_error_param = -EPROTO;
2516 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002517 goto err_bad_call_stack;
2518 }
2519 while (tmp) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002520 struct binder_thread *from;
2521
2522 spin_lock(&tmp->lock);
2523 from = tmp->from;
2524 if (from && from->proc == target_proc) {
2525 atomic_inc(&from->tmp_ref);
2526 target_thread = from;
2527 spin_unlock(&tmp->lock);
2528 break;
2529 }
2530 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002531 tmp = tmp->from_parent;
2532 }
2533 }
2534 }
2535 if (target_thread) {
2536 e->to_thread = target_thread->pid;
2537 target_list = &target_thread->todo;
2538 target_wait = &target_thread->wait;
2539 } else {
2540 target_list = &target_proc->todo;
2541 target_wait = &target_proc->wait;
2542 }
2543 e->to_proc = target_proc->pid;
2544
2545 /* TODO: reuse incoming transaction for reply */
2546 t = kzalloc(sizeof(*t), GFP_KERNEL);
2547 if (t == NULL) {
2548 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002549 return_error_param = -ENOMEM;
2550 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002551 goto err_alloc_t_failed;
2552 }
2553 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos2f993e22017-05-12 14:42:55 -07002554 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002555
2556 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2557 if (tcomplete == NULL) {
2558 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002559 return_error_param = -ENOMEM;
2560 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002561 goto err_alloc_tcomplete_failed;
2562 }
2563 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2564
Todd Kjos1cfe6272017-05-24 13:33:28 -07002565 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002566
2567 if (reply)
2568 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002569 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002570 proc->pid, thread->pid, t->debug_id,
2571 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002572 (u64)tr->data.ptr.buffer,
2573 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002574 (u64)tr->data_size, (u64)tr->offsets_size,
2575 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002576 else
2577 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen59878d72016-09-30 14:05:40 +02002578 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002579 proc->pid, thread->pid, t->debug_id,
2580 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002581 (u64)tr->data.ptr.buffer,
2582 (u64)tr->data.ptr.offsets,
Martijn Coenen59878d72016-09-30 14:05:40 +02002583 (u64)tr->data_size, (u64)tr->offsets_size,
2584 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002585
2586 if (!reply && !(tr->flags & TF_ONE_WAY))
2587 t->from = thread;
2588 else
2589 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03002590 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002591 t->to_proc = target_proc;
2592 t->to_thread = target_thread;
2593 t->code = tr->code;
2594 t->flags = tr->flags;
2595 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002596
2597 trace_binder_transaction(reply, t, target_node);
2598
Todd Kjosd325d372016-10-10 10:40:53 -07002599 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen59878d72016-09-30 14:05:40 +02002600 tr->offsets_size, extra_buffers_size,
2601 !reply && (t->flags & TF_ONE_WAY));
Todd Kjose598d172017-03-22 17:19:52 -07002602 if (IS_ERR(t->buffer)) {
2603 /*
2604 * -ESRCH indicates VMA cleared. The target is dying.
2605 */
2606 return_error_param = PTR_ERR(t->buffer);
2607 return_error = return_error_param == -ESRCH ?
2608 BR_DEAD_REPLY : BR_FAILED_REPLY;
2609 return_error_line = __LINE__;
2610 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002611 goto err_binder_alloc_buf_failed;
2612 }
2613 t->buffer->allow_user_free = 0;
2614 t->buffer->debug_id = t->debug_id;
2615 t->buffer->transaction = t;
2616 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002617 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002618 off_start = (binder_size_t *)(t->buffer->data +
2619 ALIGN(tr->data_size, sizeof(void *)));
2620 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002621
Arve Hjønnevågda498892014-02-21 14:40:26 -08002622 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2623 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302624 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2625 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002626 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002627 return_error_param = -EFAULT;
2628 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002629 goto err_copy_data_failed;
2630 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002631 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2632 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302633 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2634 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002635 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002636 return_error_param = -EFAULT;
2637 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002638 goto err_copy_data_failed;
2639 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08002640 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2641 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2642 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002643 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002644 return_error_param = -EINVAL;
2645 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002646 goto err_bad_offset;
2647 }
Martijn Coenen5a6da532016-09-30 14:10:07 +02002648 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2649 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2650 proc->pid, thread->pid,
Amit Pundir44cbb182017-02-01 12:53:45 +05302651 (u64)extra_buffers_size);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002652 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002653 return_error_param = -EINVAL;
2654 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002655 goto err_bad_offset;
2656 }
2657 off_end = (void *)off_start + tr->offsets_size;
2658 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2659 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002660 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002661 for (; offp < off_end; offp++) {
Martijn Coenen00c80372016-07-13 12:06:49 +02002662 struct binder_object_header *hdr;
2663 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09002664
Martijn Coenen00c80372016-07-13 12:06:49 +02002665 if (object_size == 0 || *offp < off_min) {
2666 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002667 proc->pid, thread->pid, (u64)*offp,
2668 (u64)off_min,
Martijn Coenen00c80372016-07-13 12:06:49 +02002669 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002670 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002671 return_error_param = -EINVAL;
2672 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002673 goto err_bad_offset;
2674 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002675
2676 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2677 off_min = *offp + object_size;
2678 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002679 case BINDER_TYPE_BINDER:
2680 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002681 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09002682
Martijn Coenen00c80372016-07-13 12:06:49 +02002683 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002684 ret = binder_translate_binder(fp, t, thread);
2685 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02002686 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002687 return_error_param = ret;
2688 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002689 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002690 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002691 } break;
2692 case BINDER_TYPE_HANDLE:
2693 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002694 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002695
Martijn Coenen00c80372016-07-13 12:06:49 +02002696 fp = to_flat_binder_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002697 ret = binder_translate_handle(fp, t, thread);
2698 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002699 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002700 return_error_param = ret;
2701 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002702 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002703 }
2704 } break;
2705
2706 case BINDER_TYPE_FD: {
Martijn Coenen00c80372016-07-13 12:06:49 +02002707 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002708 int target_fd = binder_translate_fd(fp->fd, t, thread,
2709 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002710
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002711 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002712 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002713 return_error_param = target_fd;
2714 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002715 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002716 }
Martijn Coenen00c80372016-07-13 12:06:49 +02002717 fp->pad_binder = 0;
2718 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002719 } break;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002720 case BINDER_TYPE_FDA: {
2721 struct binder_fd_array_object *fda =
2722 to_binder_fd_array_object(hdr);
2723 struct binder_buffer_object *parent =
2724 binder_validate_ptr(t->buffer, fda->parent,
2725 off_start,
2726 offp - off_start);
2727 if (!parent) {
2728 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2729 proc->pid, thread->pid);
2730 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002731 return_error_param = -EINVAL;
2732 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002733 goto err_bad_parent;
2734 }
2735 if (!binder_validate_fixup(t->buffer, off_start,
2736 parent, fda->parent_offset,
2737 last_fixup_obj,
2738 last_fixup_min_off)) {
2739 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2740 proc->pid, thread->pid);
2741 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002742 return_error_param = -EINVAL;
2743 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002744 goto err_bad_parent;
2745 }
2746 ret = binder_translate_fd_array(fda, parent, t, thread,
2747 in_reply_to);
2748 if (ret < 0) {
2749 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002750 return_error_param = ret;
2751 return_error_line = __LINE__;
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002752 goto err_translate_failed;
2753 }
2754 last_fixup_obj = parent;
2755 last_fixup_min_off =
2756 fda->parent_offset + sizeof(u32) * fda->num_fds;
2757 } break;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002758 case BINDER_TYPE_PTR: {
2759 struct binder_buffer_object *bp =
2760 to_binder_buffer_object(hdr);
2761 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002762
Martijn Coenen5a6da532016-09-30 14:10:07 +02002763 if (bp->length > buf_left) {
2764 binder_user_error("%d:%d got transaction with too large buffer\n",
2765 proc->pid, thread->pid);
2766 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002767 return_error_param = -EINVAL;
2768 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002769 goto err_bad_offset;
2770 }
2771 if (copy_from_user(sg_bufp,
2772 (const void __user *)(uintptr_t)
2773 bp->buffer, bp->length)) {
2774 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2775 proc->pid, thread->pid);
Todd Kjose598d172017-03-22 17:19:52 -07002776 return_error_param = -EFAULT;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002777 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002778 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002779 goto err_copy_data_failed;
2780 }
2781 /* Fixup buffer pointer to target proc address space */
2782 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjosd325d372016-10-10 10:40:53 -07002783 binder_alloc_get_user_buffer_offset(
2784 &target_proc->alloc);
Martijn Coenen5a6da532016-09-30 14:10:07 +02002785 sg_bufp += ALIGN(bp->length, sizeof(u64));
2786
2787 ret = binder_fixup_parent(t, thread, bp, off_start,
2788 offp - off_start,
2789 last_fixup_obj,
2790 last_fixup_min_off);
2791 if (ret < 0) {
2792 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002793 return_error_param = ret;
2794 return_error_line = __LINE__;
Martijn Coenen5a6da532016-09-30 14:10:07 +02002795 goto err_translate_failed;
2796 }
2797 last_fixup_obj = bp;
2798 last_fixup_min_off = 0;
2799 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002800 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002801 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenen00c80372016-07-13 12:06:49 +02002802 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002803 return_error = BR_FAILED_REPLY;
Todd Kjose598d172017-03-22 17:19:52 -07002804 return_error_param = -EINVAL;
2805 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002806 goto err_bad_object_type;
2807 }
2808 }
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002809 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002810 binder_enqueue_work(proc, tcomplete, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002811 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002812
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002813 if (reply) {
Todd Kjos2f993e22017-05-12 14:42:55 -07002814 if (target_thread->is_dead)
2815 goto err_dead_proc_or_thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002816 BUG_ON(t->buffer->async_transaction != 0);
2817 binder_pop_transaction(target_thread, in_reply_to);
Todd Kjos21ef40a2017-03-30 18:02:13 -07002818 binder_free_transaction(in_reply_to);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002819 binder_enqueue_work(target_proc, &t->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002820 } else if (!(t->flags & TF_ONE_WAY)) {
2821 BUG_ON(t->buffer->async_transaction != 0);
2822 t->need_reply = 1;
2823 t->from_parent = thread->transaction_stack;
2824 thread->transaction_stack = t;
Todd Kjos2f993e22017-05-12 14:42:55 -07002825 if (target_proc->is_dead ||
2826 (target_thread && target_thread->is_dead)) {
2827 binder_pop_transaction(thread, t);
2828 goto err_dead_proc_or_thread;
2829 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002830 binder_enqueue_work(target_proc, &t->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002831 } else {
2832 BUG_ON(target_node == NULL);
2833 BUG_ON(t->buffer->async_transaction != 1);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002834 binder_node_lock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002835 if (target_node->has_async_transaction) {
2836 target_list = &target_node->async_todo;
2837 target_wait = NULL;
2838 } else
2839 target_node->has_async_transaction = 1;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002840 /*
2841 * Test/set of has_async_transaction
2842 * must be atomic with enqueue on
2843 * async_todo
2844 */
Todd Kjos2f993e22017-05-12 14:42:55 -07002845 if (target_proc->is_dead ||
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002846 (target_thread && target_thread->is_dead)) {
2847 binder_node_unlock(target_node);
Todd Kjos2f993e22017-05-12 14:42:55 -07002848 goto err_dead_proc_or_thread;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07002849 }
2850 binder_enqueue_work(target_proc, &t->work, target_list);
2851 binder_node_unlock(target_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002852 }
Riley Andrewsb5968812015-09-01 12:42:07 -07002853 if (target_wait) {
Todd Kjos8dedb0c2017-05-09 08:31:32 -07002854 if (reply || !(tr->flags & TF_ONE_WAY))
Riley Andrewsb5968812015-09-01 12:42:07 -07002855 wake_up_interruptible_sync(target_wait);
2856 else
2857 wake_up_interruptible(target_wait);
2858 }
Todd Kjos2f993e22017-05-12 14:42:55 -07002859 if (target_thread)
2860 binder_thread_dec_tmpref(target_thread);
2861 binder_proc_dec_tmpref(target_proc);
Todd Kjos1cfe6272017-05-24 13:33:28 -07002862 /*
2863 * write barrier to synchronize with initialization
2864 * of log entry
2865 */
2866 smp_wmb();
2867 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002868 return;
2869
Todd Kjos2f993e22017-05-12 14:42:55 -07002870err_dead_proc_or_thread:
2871 return_error = BR_DEAD_REPLY;
2872 return_error_line = __LINE__;
Martijn Coenend82cb8b2016-09-29 15:38:14 +02002873err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002874err_bad_object_type:
2875err_bad_offset:
Martijn Coenene3e0f4802016-10-18 13:58:55 +02002876err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002877err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002878 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002879 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjosc37162d2017-05-26 11:56:29 -07002880 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002881 t->buffer->transaction = NULL;
Todd Kjosd325d372016-10-10 10:40:53 -07002882 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002883err_binder_alloc_buf_failed:
2884 kfree(tcomplete);
2885 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2886err_alloc_tcomplete_failed:
2887 kfree(t);
2888 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2889err_alloc_t_failed:
2890err_bad_call_stack:
2891err_empty_call_stack:
2892err_dead_binder:
2893err_invalid_target_handle:
2894err_no_context_mgr_node:
Todd Kjos2f993e22017-05-12 14:42:55 -07002895 if (target_thread)
2896 binder_thread_dec_tmpref(target_thread);
2897 if (target_proc)
2898 binder_proc_dec_tmpref(target_proc);
Todd Kjosc37162d2017-05-26 11:56:29 -07002899 if (target_node)
2900 binder_dec_node(target_node, 1, 0);
2901
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002902 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjose598d172017-03-22 17:19:52 -07002903 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2904 proc->pid, thread->pid, return_error, return_error_param,
2905 (u64)tr->data_size, (u64)tr->offsets_size,
2906 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002907
2908 {
2909 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09002910
Todd Kjose598d172017-03-22 17:19:52 -07002911 e->return_error = return_error;
2912 e->return_error_param = return_error_param;
2913 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002914 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2915 *fe = *e;
Todd Kjos1cfe6272017-05-24 13:33:28 -07002916 /*
2917 * write barrier to synchronize with initialization
2918 * of log entry
2919 */
2920 smp_wmb();
2921 WRITE_ONCE(e->debug_id_done, t_debug_id);
2922 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002923 }
2924
Todd Kjos858b8da2017-04-21 17:35:12 -07002925 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002926 if (in_reply_to) {
Todd Kjos858b8da2017-04-21 17:35:12 -07002927 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002928 binder_enqueue_work(thread->proc,
2929 &thread->return_error.work,
2930 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002931 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos858b8da2017-04-21 17:35:12 -07002932 } else {
2933 thread->return_error.cmd = return_error;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07002934 binder_enqueue_work(thread->proc,
2935 &thread->return_error.work,
2936 &thread->todo);
Todd Kjos858b8da2017-04-21 17:35:12 -07002937 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002938}
2939
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02002940static int binder_thread_write(struct binder_proc *proc,
2941 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002942 binder_uintptr_t binder_buffer, size_t size,
2943 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002944{
2945 uint32_t cmd;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02002946 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002947 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002948 void __user *ptr = buffer + *consumed;
2949 void __user *end = buffer + size;
2950
Todd Kjos858b8da2017-04-21 17:35:12 -07002951 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjosb0117bb2017-05-08 09:16:27 -07002952 int ret;
2953
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002954 if (get_user(cmd, (uint32_t __user *)ptr))
2955 return -EFAULT;
2956 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002957 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002958 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07002959 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2960 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2961 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002962 }
2963 switch (cmd) {
2964 case BC_INCREFS:
2965 case BC_ACQUIRE:
2966 case BC_RELEASE:
2967 case BC_DECREFS: {
2968 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002969 const char *debug_string;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002970 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
2971 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
2972 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002973
2974 if (get_user(target, (uint32_t __user *)ptr))
2975 return -EFAULT;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002976
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002977 ptr += sizeof(uint32_t);
Todd Kjosb0117bb2017-05-08 09:16:27 -07002978 ret = -1;
2979 if (increment && !target) {
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002980 struct binder_node *ctx_mgr_node;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002981 mutex_lock(&context->context_mgr_node_lock);
2982 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjosb0117bb2017-05-08 09:16:27 -07002983 if (ctx_mgr_node)
2984 ret = binder_inc_ref_for_node(
2985 proc, ctx_mgr_node,
2986 strong, NULL, &rdata);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07002987 mutex_unlock(&context->context_mgr_node_lock);
2988 }
Todd Kjosb0117bb2017-05-08 09:16:27 -07002989 if (ret)
2990 ret = binder_update_ref_for_handle(
2991 proc, target, increment, strong,
2992 &rdata);
2993 if (!ret && rdata.desc != target) {
2994 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
2995 proc->pid, thread->pid,
2996 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002997 }
2998 switch (cmd) {
2999 case BC_INCREFS:
3000 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003001 break;
3002 case BC_ACQUIRE:
3003 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003004 break;
3005 case BC_RELEASE:
3006 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003007 break;
3008 case BC_DECREFS:
3009 default:
3010 debug_string = "DecRefs";
Todd Kjosb0117bb2017-05-08 09:16:27 -07003011 break;
3012 }
3013 if (ret) {
3014 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3015 proc->pid, thread->pid, debug_string,
3016 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003017 break;
3018 }
3019 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosb0117bb2017-05-08 09:16:27 -07003020 "%d:%d %s ref %d desc %d s %d w %d\n",
3021 proc->pid, thread->pid, debug_string,
3022 rdata.debug_id, rdata.desc, rdata.strong,
3023 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003024 break;
3025 }
3026 case BC_INCREFS_DONE:
3027 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003028 binder_uintptr_t node_ptr;
3029 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003030 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003031 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003032
Arve Hjønnevågda498892014-02-21 14:40:26 -08003033 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003034 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003035 ptr += sizeof(binder_uintptr_t);
3036 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003037 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003038 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003039 node = binder_get_node(proc, node_ptr);
3040 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003041 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003042 proc->pid, thread->pid,
3043 cmd == BC_INCREFS_DONE ?
3044 "BC_INCREFS_DONE" :
3045 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003046 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003047 break;
3048 }
3049 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003050 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003051 proc->pid, thread->pid,
3052 cmd == BC_INCREFS_DONE ?
3053 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003054 (u64)node_ptr, node->debug_id,
3055 (u64)cookie, (u64)node->cookie);
Todd Kjosf22abc72017-05-09 11:08:05 -07003056 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003057 break;
3058 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003059 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003060 if (cmd == BC_ACQUIRE_DONE) {
3061 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303062 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003063 proc->pid, thread->pid,
3064 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003065 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003066 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003067 break;
3068 }
3069 node->pending_strong_ref = 0;
3070 } else {
3071 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303072 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003073 proc->pid, thread->pid,
3074 node->debug_id);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003075 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003076 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003077 break;
3078 }
3079 node->pending_weak_ref = 0;
3080 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003081 free_node = binder_dec_node_nilocked(node,
3082 cmd == BC_ACQUIRE_DONE, 0);
3083 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003084 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosf22abc72017-05-09 11:08:05 -07003085 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003086 proc->pid, thread->pid,
3087 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosf22abc72017-05-09 11:08:05 -07003088 node->debug_id, node->local_strong_refs,
3089 node->local_weak_refs, node->tmp_refs);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003090 binder_node_inner_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07003091 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003092 break;
3093 }
3094 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303095 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003096 return -EINVAL;
3097 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303098 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003099 return -EINVAL;
3100
3101 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003102 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003103 struct binder_buffer *buffer;
3104
Arve Hjønnevågda498892014-02-21 14:40:26 -08003105 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003106 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003107 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003108
Todd Kjos076072a2017-04-21 14:32:11 -07003109 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3110 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003111 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003112 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3113 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003114 break;
3115 }
3116 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003117 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3118 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003119 break;
3120 }
3121 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003122 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3123 proc->pid, thread->pid, (u64)data_ptr,
3124 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003125 buffer->transaction ? "active" : "finished");
3126
3127 if (buffer->transaction) {
3128 buffer->transaction->buffer = NULL;
3129 buffer->transaction = NULL;
3130 }
3131 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003132 struct binder_node *buf_node;
3133 struct binder_work *w;
3134
3135 buf_node = buffer->target_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003136 binder_node_inner_lock(buf_node);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003137 BUG_ON(!buf_node->has_async_transaction);
3138 BUG_ON(buf_node->proc != proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003139 w = binder_dequeue_work_head_ilocked(
3140 &buf_node->async_todo);
3141 if (!w)
3142 buf_node->has_async_transaction = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003143 else
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003144 binder_enqueue_work_ilocked(
3145 w, &thread->todo);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003146 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003147 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003148 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003149 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjosd325d372016-10-10 10:40:53 -07003150 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003151 break;
3152 }
3153
Martijn Coenen5a6da532016-09-30 14:10:07 +02003154 case BC_TRANSACTION_SG:
3155 case BC_REPLY_SG: {
3156 struct binder_transaction_data_sg tr;
3157
3158 if (copy_from_user(&tr, ptr, sizeof(tr)))
3159 return -EFAULT;
3160 ptr += sizeof(tr);
3161 binder_transaction(proc, thread, &tr.transaction_data,
3162 cmd == BC_REPLY_SG, tr.buffers_size);
3163 break;
3164 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003165 case BC_TRANSACTION:
3166 case BC_REPLY: {
3167 struct binder_transaction_data tr;
3168
3169 if (copy_from_user(&tr, ptr, sizeof(tr)))
3170 return -EFAULT;
3171 ptr += sizeof(tr);
Martijn Coenen59878d72016-09-30 14:05:40 +02003172 binder_transaction(proc, thread, &tr,
3173 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003174 break;
3175 }
3176
3177 case BC_REGISTER_LOOPER:
3178 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303179 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003180 proc->pid, thread->pid);
3181 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3182 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303183 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003184 proc->pid, thread->pid);
3185 } else if (proc->requested_threads == 0) {
3186 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303187 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003188 proc->pid, thread->pid);
3189 } else {
3190 proc->requested_threads--;
3191 proc->requested_threads_started++;
3192 }
3193 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3194 break;
3195 case BC_ENTER_LOOPER:
3196 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303197 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003198 proc->pid, thread->pid);
3199 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3200 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303201 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003202 proc->pid, thread->pid);
3203 }
3204 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3205 break;
3206 case BC_EXIT_LOOPER:
3207 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303208 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003209 proc->pid, thread->pid);
3210 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3211 break;
3212
3213 case BC_REQUEST_DEATH_NOTIFICATION:
3214 case BC_CLEAR_DEATH_NOTIFICATION: {
3215 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003216 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003217 struct binder_ref *ref;
3218 struct binder_ref_death *death;
3219
3220 if (get_user(target, (uint32_t __user *)ptr))
3221 return -EFAULT;
3222 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003223 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003224 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003225 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003226 ref = binder_get_ref(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003227 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303228 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003229 proc->pid, thread->pid,
3230 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3231 "BC_REQUEST_DEATH_NOTIFICATION" :
3232 "BC_CLEAR_DEATH_NOTIFICATION",
3233 target);
3234 break;
3235 }
3236
3237 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003238 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003239 proc->pid, thread->pid,
3240 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3241 "BC_REQUEST_DEATH_NOTIFICATION" :
3242 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjosb0117bb2017-05-08 09:16:27 -07003243 (u64)cookie, ref->data.debug_id,
3244 ref->data.desc, ref->data.strong,
3245 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003246
3247 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3248 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303249 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003250 proc->pid, thread->pid);
3251 break;
3252 }
3253 death = kzalloc(sizeof(*death), GFP_KERNEL);
3254 if (death == NULL) {
Todd Kjos858b8da2017-04-21 17:35:12 -07003255 WARN_ON(thread->return_error.cmd !=
3256 BR_OK);
3257 thread->return_error.cmd = BR_ERROR;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003258 binder_enqueue_work(
3259 thread->proc,
3260 &thread->return_error.work,
3261 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003262 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303263 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003264 proc->pid, thread->pid);
3265 break;
3266 }
3267 binder_stats_created(BINDER_STAT_DEATH);
3268 INIT_LIST_HEAD(&death->work.entry);
3269 death->cookie = cookie;
3270 ref->death = death;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003271 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003272 if (ref->node->proc == NULL) {
3273 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003274 if (thread->looper &
3275 (BINDER_LOOPER_STATE_REGISTERED |
3276 BINDER_LOOPER_STATE_ENTERED))
3277 binder_enqueue_work(
3278 proc,
3279 &ref->death->work,
3280 &thread->todo);
3281 else {
3282 binder_enqueue_work(
3283 proc,
3284 &ref->death->work,
3285 &proc->todo);
3286 wake_up_interruptible(
3287 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003288 }
3289 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003290 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003291 } else {
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003292 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003293 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303294 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003295 proc->pid, thread->pid);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003296 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003297 break;
3298 }
3299 death = ref->death;
3300 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003301 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003302 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003303 (u64)death->cookie,
3304 (u64)cookie);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003305 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003306 break;
3307 }
3308 ref->death = NULL;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003309 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003310 if (list_empty(&death->work.entry)) {
3311 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003312 if (thread->looper &
3313 (BINDER_LOOPER_STATE_REGISTERED |
3314 BINDER_LOOPER_STATE_ENTERED))
3315 binder_enqueue_work_ilocked(
3316 &death->work,
3317 &thread->todo);
3318 else {
3319 binder_enqueue_work_ilocked(
3320 &death->work,
3321 &proc->todo);
3322 wake_up_interruptible(
3323 &proc->wait);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003324 }
3325 } else {
3326 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3327 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3328 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003329 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003330 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003331 }
3332 } break;
3333 case BC_DEAD_BINDER_DONE: {
3334 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003335 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003336 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09003337
Arve Hjønnevågda498892014-02-21 14:40:26 -08003338 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003339 return -EFAULT;
3340
Lisa Du7a64cd82016-02-17 09:32:52 +08003341 ptr += sizeof(cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003342 binder_inner_proc_lock(proc);
3343 list_for_each_entry(w, &proc->delivered_death,
3344 entry) {
3345 struct binder_ref_death *tmp_death =
3346 container_of(w,
3347 struct binder_ref_death,
3348 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09003349
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003350 if (tmp_death->cookie == cookie) {
3351 death = tmp_death;
3352 break;
3353 }
3354 }
3355 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003356 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3357 proc->pid, thread->pid, (u64)cookie,
3358 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003359 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003360 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3361 proc->pid, thread->pid, (u64)cookie);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003362 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003363 break;
3364 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003365 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003366 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3367 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003368 if (thread->looper &
3369 (BINDER_LOOPER_STATE_REGISTERED |
3370 BINDER_LOOPER_STATE_ENTERED))
3371 binder_enqueue_work_ilocked(
3372 &death->work, &thread->todo);
3373 else {
3374 binder_enqueue_work_ilocked(
3375 &death->work,
3376 &proc->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003377 wake_up_interruptible(&proc->wait);
3378 }
3379 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003380 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003381 } break;
3382
3383 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303384 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003385 proc->pid, thread->pid, cmd);
3386 return -EINVAL;
3387 }
3388 *consumed = ptr - buffer;
3389 }
3390 return 0;
3391}
3392
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003393static void binder_stat_br(struct binder_proc *proc,
3394 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003395{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003396 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003397 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07003398 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3399 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3400 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003401 }
3402}
3403
3404static int binder_has_proc_work(struct binder_proc *proc,
3405 struct binder_thread *thread)
3406{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003407 return !binder_worklist_empty(proc, &proc->todo) ||
3408 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003409}
3410
3411static int binder_has_thread_work(struct binder_thread *thread)
3412{
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003413 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3414 thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003415}
3416
Todd Kjos60792612017-05-24 10:51:01 -07003417static int binder_put_node_cmd(struct binder_proc *proc,
3418 struct binder_thread *thread,
3419 void __user **ptrp,
3420 binder_uintptr_t node_ptr,
3421 binder_uintptr_t node_cookie,
3422 int node_debug_id,
3423 uint32_t cmd, const char *cmd_name)
3424{
3425 void __user *ptr = *ptrp;
3426
3427 if (put_user(cmd, (uint32_t __user *)ptr))
3428 return -EFAULT;
3429 ptr += sizeof(uint32_t);
3430
3431 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3432 return -EFAULT;
3433 ptr += sizeof(binder_uintptr_t);
3434
3435 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3436 return -EFAULT;
3437 ptr += sizeof(binder_uintptr_t);
3438
3439 binder_stat_br(proc, thread, cmd);
3440 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3441 proc->pid, thread->pid, cmd_name, node_debug_id,
3442 (u64)node_ptr, (u64)node_cookie);
3443
3444 *ptrp = ptr;
3445 return 0;
3446}
3447
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003448static int binder_thread_read(struct binder_proc *proc,
3449 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003450 binder_uintptr_t binder_buffer, size_t size,
3451 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003452{
Arve Hjønnevågda498892014-02-21 14:40:26 -08003453 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003454 void __user *ptr = buffer + *consumed;
3455 void __user *end = buffer + size;
3456
3457 int ret = 0;
3458 int wait_for_proc_work;
3459
3460 if (*consumed == 0) {
3461 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3462 return -EFAULT;
3463 ptr += sizeof(uint32_t);
3464 }
3465
3466retry:
3467 wait_for_proc_work = thread->transaction_stack == NULL &&
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003468 binder_worklist_empty(proc, &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003469
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003470 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3471 if (wait_for_proc_work)
3472 proc->ready_threads++;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003473
3474 binder_unlock(__func__);
3475
3476 trace_binder_wait_for_work(wait_for_proc_work,
3477 !!thread->transaction_stack,
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003478 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003479 if (wait_for_proc_work) {
3480 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3481 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303482 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003483 proc->pid, thread->pid, thread->looper);
3484 wait_event_interruptible(binder_user_error_wait,
3485 binder_stop_on_user_error < 2);
3486 }
3487 binder_set_nice(proc->default_priority);
3488 if (non_block) {
3489 if (!binder_has_proc_work(proc, thread))
3490 ret = -EAGAIN;
3491 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003492 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003493 } else {
3494 if (non_block) {
3495 if (!binder_has_thread_work(thread))
3496 ret = -EAGAIN;
3497 } else
Colin Crosse2610b22013-05-06 23:50:15 +00003498 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003499 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003500
3501 binder_lock(__func__);
3502
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003503 if (wait_for_proc_work)
3504 proc->ready_threads--;
3505 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3506
3507 if (ret)
3508 return ret;
3509
3510 while (1) {
3511 uint32_t cmd;
3512 struct binder_transaction_data tr;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003513 struct binder_work *w = NULL;
3514 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003515 struct binder_transaction *t = NULL;
Todd Kjos2f993e22017-05-12 14:42:55 -07003516 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003517
Todd Kjose7f23ed2017-03-21 13:06:01 -07003518 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003519 if (!binder_worklist_empty_ilocked(&thread->todo))
3520 list = &thread->todo;
3521 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3522 wait_for_proc_work)
3523 list = &proc->todo;
3524 else {
3525 binder_inner_proc_unlock(proc);
3526
Dmitry Voytik395262a2014-09-08 18:16:34 +04003527 /* no data added */
Todd Kjos6798e6d2017-01-06 14:19:25 -08003528 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003529 goto retry;
3530 break;
3531 }
3532
Todd Kjose7f23ed2017-03-21 13:06:01 -07003533 if (end - ptr < sizeof(tr) + 4) {
3534 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003535 break;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003536 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003537 w = binder_dequeue_work_head_ilocked(list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003538
3539 switch (w->type) {
3540 case BINDER_WORK_TRANSACTION: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003541 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003542 t = container_of(w, struct binder_transaction, work);
3543 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003544 case BINDER_WORK_RETURN_ERROR: {
3545 struct binder_error *e = container_of(
3546 w, struct binder_error, work);
3547
3548 WARN_ON(e->cmd == BR_OK);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003549 binder_inner_proc_unlock(proc);
Todd Kjos858b8da2017-04-21 17:35:12 -07003550 if (put_user(e->cmd, (uint32_t __user *)ptr))
3551 return -EFAULT;
3552 e->cmd = BR_OK;
3553 ptr += sizeof(uint32_t);
3554
3555 binder_stat_br(proc, thread, cmd);
Todd Kjos858b8da2017-04-21 17:35:12 -07003556 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003557 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjose7f23ed2017-03-21 13:06:01 -07003558 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003559 cmd = BR_TRANSACTION_COMPLETE;
3560 if (put_user(cmd, (uint32_t __user *)ptr))
3561 return -EFAULT;
3562 ptr += sizeof(uint32_t);
3563
3564 binder_stat_br(proc, thread, cmd);
3565 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303566 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003567 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003568 kfree(w);
3569 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3570 } break;
3571 case BINDER_WORK_NODE: {
3572 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos60792612017-05-24 10:51:01 -07003573 int strong, weak;
3574 binder_uintptr_t node_ptr = node->ptr;
3575 binder_uintptr_t node_cookie = node->cookie;
3576 int node_debug_id = node->debug_id;
3577 int has_weak_ref;
3578 int has_strong_ref;
3579 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09003580
Todd Kjos60792612017-05-24 10:51:01 -07003581 BUG_ON(proc != node->proc);
3582 strong = node->internal_strong_refs ||
3583 node->local_strong_refs;
3584 weak = !hlist_empty(&node->refs) ||
Todd Kjosf22abc72017-05-09 11:08:05 -07003585 node->local_weak_refs ||
3586 node->tmp_refs || strong;
Todd Kjos60792612017-05-24 10:51:01 -07003587 has_strong_ref = node->has_strong_ref;
3588 has_weak_ref = node->has_weak_ref;
3589
3590 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003591 node->has_weak_ref = 1;
3592 node->pending_weak_ref = 1;
3593 node->local_weak_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003594 }
3595 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003596 node->has_strong_ref = 1;
3597 node->pending_strong_ref = 1;
3598 node->local_strong_refs++;
Todd Kjos60792612017-05-24 10:51:01 -07003599 }
3600 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003601 node->has_strong_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003602 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003603 node->has_weak_ref = 0;
Todd Kjos60792612017-05-24 10:51:01 -07003604 if (!weak && !strong) {
3605 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3606 "%d:%d node %d u%016llx c%016llx deleted\n",
3607 proc->pid, thread->pid,
3608 node_debug_id,
3609 (u64)node_ptr,
3610 (u64)node_cookie);
3611 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003612 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07003613 binder_node_lock(node);
3614 /*
3615 * Acquire the node lock before freeing the
3616 * node to serialize with other threads that
3617 * may have been holding the node lock while
3618 * decrementing this node (avoids race where
3619 * this thread frees while the other thread
3620 * is unlocking the node after the final
3621 * decrement)
3622 */
3623 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003624 binder_free_node(node);
3625 } else
3626 binder_inner_proc_unlock(proc);
3627
Todd Kjos60792612017-05-24 10:51:01 -07003628 if (weak && !has_weak_ref)
3629 ret = binder_put_node_cmd(
3630 proc, thread, &ptr, node_ptr,
3631 node_cookie, node_debug_id,
3632 BR_INCREFS, "BR_INCREFS");
3633 if (!ret && strong && !has_strong_ref)
3634 ret = binder_put_node_cmd(
3635 proc, thread, &ptr, node_ptr,
3636 node_cookie, node_debug_id,
3637 BR_ACQUIRE, "BR_ACQUIRE");
3638 if (!ret && !strong && has_strong_ref)
3639 ret = binder_put_node_cmd(
3640 proc, thread, &ptr, node_ptr,
3641 node_cookie, node_debug_id,
3642 BR_RELEASE, "BR_RELEASE");
3643 if (!ret && !weak && has_weak_ref)
3644 ret = binder_put_node_cmd(
3645 proc, thread, &ptr, node_ptr,
3646 node_cookie, node_debug_id,
3647 BR_DECREFS, "BR_DECREFS");
3648 if (orig_ptr == ptr)
3649 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3650 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3651 proc->pid, thread->pid,
3652 node_debug_id,
3653 (u64)node_ptr,
3654 (u64)node_cookie);
3655 if (ret)
3656 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003657 } break;
3658 case BINDER_WORK_DEAD_BINDER:
3659 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3660 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3661 struct binder_ref_death *death;
3662 uint32_t cmd;
3663
3664 death = container_of(w, struct binder_ref_death, work);
3665 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3666 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3667 else
3668 cmd = BR_DEAD_BINDER;
Todd Kjose7f23ed2017-03-21 13:06:01 -07003669 /*
3670 * TODO: there is a race condition between
3671 * death notification requests and delivery
3672 * of the notifications. This will be handled
3673 * in a later patch.
3674 */
3675 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003676 if (put_user(cmd, (uint32_t __user *)ptr))
3677 return -EFAULT;
3678 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003679 if (put_user(death->cookie,
3680 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003681 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003682 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003683 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003684 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003685 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003686 proc->pid, thread->pid,
3687 cmd == BR_DEAD_BINDER ?
3688 "BR_DEAD_BINDER" :
3689 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003690 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003691
3692 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003693 kfree(death);
3694 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003695 } else {
3696 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003697 binder_enqueue_work_ilocked(
3698 w, &proc->delivered_death);
Todd Kjose7f23ed2017-03-21 13:06:01 -07003699 binder_inner_proc_unlock(proc);
3700 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003701 if (cmd == BR_DEAD_BINDER)
3702 goto done; /* DEAD_BINDER notifications can cause transactions */
3703 } break;
3704 }
3705
3706 if (!t)
3707 continue;
3708
3709 BUG_ON(t->buffer == NULL);
3710 if (t->buffer->target_node) {
3711 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09003712
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003713 tr.target.ptr = target_node->ptr;
3714 tr.cookie = target_node->cookie;
3715 t->saved_priority = task_nice(current);
3716 if (t->priority < target_node->min_priority &&
3717 !(t->flags & TF_ONE_WAY))
3718 binder_set_nice(t->priority);
3719 else if (!(t->flags & TF_ONE_WAY) ||
3720 t->saved_priority > target_node->min_priority)
3721 binder_set_nice(target_node->min_priority);
3722 cmd = BR_TRANSACTION;
3723 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003724 tr.target.ptr = 0;
3725 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003726 cmd = BR_REPLY;
3727 }
3728 tr.code = t->code;
3729 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06003730 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003731
Todd Kjos2f993e22017-05-12 14:42:55 -07003732 t_from = binder_get_txn_from(t);
3733 if (t_from) {
3734 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09003735
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003736 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08003737 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003738 } else {
3739 tr.sender_pid = 0;
3740 }
3741
3742 tr.data_size = t->buffer->data_size;
3743 tr.offsets_size = t->buffer->offsets_size;
Todd Kjosd325d372016-10-10 10:40:53 -07003744 tr.data.ptr.buffer = (binder_uintptr_t)
3745 ((uintptr_t)t->buffer->data +
3746 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003747 tr.data.ptr.offsets = tr.data.ptr.buffer +
3748 ALIGN(t->buffer->data_size,
3749 sizeof(void *));
3750
Todd Kjos2f993e22017-05-12 14:42:55 -07003751 if (put_user(cmd, (uint32_t __user *)ptr)) {
3752 if (t_from)
3753 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003754 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003755 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003756 ptr += sizeof(uint32_t);
Todd Kjos2f993e22017-05-12 14:42:55 -07003757 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3758 if (t_from)
3759 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003760 return -EFAULT;
Todd Kjos2f993e22017-05-12 14:42:55 -07003761 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003762 ptr += sizeof(tr);
3763
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003764 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003765 binder_stat_br(proc, thread, cmd);
3766 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003767 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003768 proc->pid, thread->pid,
3769 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3770 "BR_REPLY",
Todd Kjos2f993e22017-05-12 14:42:55 -07003771 t->debug_id, t_from ? t_from->proc->pid : 0,
3772 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003773 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003774 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003775
Todd Kjos2f993e22017-05-12 14:42:55 -07003776 if (t_from)
3777 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003778 t->buffer->allow_user_free = 1;
3779 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
3780 t->to_parent = thread->transaction_stack;
3781 t->to_thread = thread;
3782 thread->transaction_stack = t;
3783 } else {
Todd Kjos21ef40a2017-03-30 18:02:13 -07003784 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003785 }
3786 break;
3787 }
3788
3789done:
3790
3791 *consumed = ptr - buffer;
3792 if (proc->requested_threads + proc->ready_threads == 0 &&
3793 proc->requested_threads_started < proc->max_threads &&
3794 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3795 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3796 /*spawn a new thread if we leave this out */) {
3797 proc->requested_threads++;
3798 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303799 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003800 proc->pid, thread->pid);
3801 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3802 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07003803 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003804 }
3805 return 0;
3806}
3807
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003808static void binder_release_work(struct binder_proc *proc,
3809 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003810{
3811 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09003812
Todd Kjos1c89e6b2016-10-20 10:33:00 -07003813 while (1) {
3814 w = binder_dequeue_work_head(proc, list);
3815 if (!w)
3816 return;
3817
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003818 switch (w->type) {
3819 case BINDER_WORK_TRANSACTION: {
3820 struct binder_transaction *t;
3821
3822 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003823 if (t->buffer->target_node &&
3824 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003825 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003826 } else {
3827 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303828 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003829 t->debug_id);
Todd Kjos21ef40a2017-03-30 18:02:13 -07003830 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003831 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003832 } break;
Todd Kjos858b8da2017-04-21 17:35:12 -07003833 case BINDER_WORK_RETURN_ERROR: {
3834 struct binder_error *e = container_of(
3835 w, struct binder_error, work);
3836
3837 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3838 "undelivered TRANSACTION_ERROR: %u\n",
3839 e->cmd);
3840 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003841 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003842 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303843 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003844 kfree(w);
3845 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3846 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003847 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3848 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3849 struct binder_ref_death *death;
3850
3851 death = container_of(w, struct binder_ref_death, work);
3852 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003853 "undelivered death notification, %016llx\n",
3854 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003855 kfree(death);
3856 binder_stats_deleted(BINDER_STAT_DEATH);
3857 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003858 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303859 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003860 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003861 break;
3862 }
3863 }
3864
3865}
3866
Todd Kjosb4827902017-05-25 15:52:17 -07003867static struct binder_thread *binder_get_thread_ilocked(
3868 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003869{
3870 struct binder_thread *thread = NULL;
3871 struct rb_node *parent = NULL;
3872 struct rb_node **p = &proc->threads.rb_node;
3873
3874 while (*p) {
3875 parent = *p;
3876 thread = rb_entry(parent, struct binder_thread, rb_node);
3877
3878 if (current->pid < thread->pid)
3879 p = &(*p)->rb_left;
3880 else if (current->pid > thread->pid)
3881 p = &(*p)->rb_right;
3882 else
Todd Kjosb4827902017-05-25 15:52:17 -07003883 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003884 }
Todd Kjosb4827902017-05-25 15:52:17 -07003885 if (!new_thread)
3886 return NULL;
3887 thread = new_thread;
3888 binder_stats_created(BINDER_STAT_THREAD);
3889 thread->proc = proc;
3890 thread->pid = current->pid;
3891 atomic_set(&thread->tmp_ref, 0);
3892 init_waitqueue_head(&thread->wait);
3893 INIT_LIST_HEAD(&thread->todo);
3894 rb_link_node(&thread->rb_node, parent, p);
3895 rb_insert_color(&thread->rb_node, &proc->threads);
3896 thread->looper_need_return = true;
3897 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3898 thread->return_error.cmd = BR_OK;
3899 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3900 thread->reply_error.cmd = BR_OK;
3901
3902 return thread;
3903}
3904
3905static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3906{
3907 struct binder_thread *thread;
3908 struct binder_thread *new_thread;
3909
3910 binder_inner_proc_lock(proc);
3911 thread = binder_get_thread_ilocked(proc, NULL);
3912 binder_inner_proc_unlock(proc);
3913 if (!thread) {
3914 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3915 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003916 return NULL;
Todd Kjosb4827902017-05-25 15:52:17 -07003917 binder_inner_proc_lock(proc);
3918 thread = binder_get_thread_ilocked(proc, new_thread);
3919 binder_inner_proc_unlock(proc);
3920 if (thread != new_thread)
3921 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003922 }
3923 return thread;
3924}
3925
Todd Kjos2f993e22017-05-12 14:42:55 -07003926static void binder_free_proc(struct binder_proc *proc)
3927{
3928 BUG_ON(!list_empty(&proc->todo));
3929 BUG_ON(!list_empty(&proc->delivered_death));
3930 binder_alloc_deferred_release(&proc->alloc);
3931 put_task_struct(proc->tsk);
3932 binder_stats_deleted(BINDER_STAT_PROC);
3933 kfree(proc);
3934}
3935
3936static void binder_free_thread(struct binder_thread *thread)
3937{
3938 BUG_ON(!list_empty(&thread->todo));
3939 binder_stats_deleted(BINDER_STAT_THREAD);
3940 binder_proc_dec_tmpref(thread->proc);
3941 kfree(thread);
3942}
3943
3944static int binder_thread_release(struct binder_proc *proc,
3945 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003946{
3947 struct binder_transaction *t;
3948 struct binder_transaction *send_reply = NULL;
3949 int active_transactions = 0;
Todd Kjos2f993e22017-05-12 14:42:55 -07003950 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003951
Todd Kjosb4827902017-05-25 15:52:17 -07003952 binder_inner_proc_lock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07003953 /*
3954 * take a ref on the proc so it survives
3955 * after we remove this thread from proc->threads.
3956 * The corresponding dec is when we actually
3957 * free the thread in binder_free_thread()
3958 */
3959 proc->tmp_ref++;
3960 /*
3961 * take a ref on this thread to ensure it
3962 * survives while we are releasing it
3963 */
3964 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003965 rb_erase(&thread->rb_node, &proc->threads);
3966 t = thread->transaction_stack;
Todd Kjos2f993e22017-05-12 14:42:55 -07003967 if (t) {
3968 spin_lock(&t->lock);
3969 if (t->to_thread == thread)
3970 send_reply = t;
3971 }
3972 thread->is_dead = true;
3973
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003974 while (t) {
Todd Kjos2f993e22017-05-12 14:42:55 -07003975 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003976 active_transactions++;
3977 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303978 "release %d:%d transaction %d %s, still active\n",
3979 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003980 t->debug_id,
3981 (t->to_thread == thread) ? "in" : "out");
3982
3983 if (t->to_thread == thread) {
3984 t->to_proc = NULL;
3985 t->to_thread = NULL;
3986 if (t->buffer) {
3987 t->buffer->transaction = NULL;
3988 t->buffer = NULL;
3989 }
3990 t = t->to_parent;
3991 } else if (t->from == thread) {
3992 t->from = NULL;
3993 t = t->from_parent;
3994 } else
3995 BUG();
Todd Kjos2f993e22017-05-12 14:42:55 -07003996 spin_unlock(&last_t->lock);
3997 if (t)
3998 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003999 }
Todd Kjosb4827902017-05-25 15:52:17 -07004000 binder_inner_proc_unlock(thread->proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004001
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004002 if (send_reply)
4003 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004004 binder_release_work(proc, &thread->todo);
Todd Kjos2f993e22017-05-12 14:42:55 -07004005 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004006 return active_transactions;
4007}
4008
4009static unsigned int binder_poll(struct file *filp,
4010 struct poll_table_struct *wait)
4011{
4012 struct binder_proc *proc = filp->private_data;
4013 struct binder_thread *thread = NULL;
4014 int wait_for_proc_work;
4015
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004016 binder_lock(__func__);
4017
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004018 thread = binder_get_thread(proc);
4019
4020 wait_for_proc_work = thread->transaction_stack == NULL &&
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004021 binder_worklist_empty(proc, &thread->todo);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004022
4023 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004024
4025 if (wait_for_proc_work) {
4026 if (binder_has_proc_work(proc, thread))
4027 return POLLIN;
4028 poll_wait(filp, &proc->wait, wait);
4029 if (binder_has_proc_work(proc, thread))
4030 return POLLIN;
4031 } else {
4032 if (binder_has_thread_work(thread))
4033 return POLLIN;
4034 poll_wait(filp, &thread->wait, wait);
4035 if (binder_has_thread_work(thread))
4036 return POLLIN;
4037 }
4038 return 0;
4039}
4040
Tair Rzayev78260ac2014-06-03 22:27:21 +03004041static int binder_ioctl_write_read(struct file *filp,
4042 unsigned int cmd, unsigned long arg,
4043 struct binder_thread *thread)
4044{
4045 int ret = 0;
4046 struct binder_proc *proc = filp->private_data;
4047 unsigned int size = _IOC_SIZE(cmd);
4048 void __user *ubuf = (void __user *)arg;
4049 struct binder_write_read bwr;
4050
4051 if (size != sizeof(struct binder_write_read)) {
4052 ret = -EINVAL;
4053 goto out;
4054 }
4055 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4056 ret = -EFAULT;
4057 goto out;
4058 }
4059 binder_debug(BINDER_DEBUG_READ_WRITE,
4060 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4061 proc->pid, thread->pid,
4062 (u64)bwr.write_size, (u64)bwr.write_buffer,
4063 (u64)bwr.read_size, (u64)bwr.read_buffer);
4064
4065 if (bwr.write_size > 0) {
4066 ret = binder_thread_write(proc, thread,
4067 bwr.write_buffer,
4068 bwr.write_size,
4069 &bwr.write_consumed);
4070 trace_binder_write_done(ret);
4071 if (ret < 0) {
4072 bwr.read_consumed = 0;
4073 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4074 ret = -EFAULT;
4075 goto out;
4076 }
4077 }
4078 if (bwr.read_size > 0) {
4079 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4080 bwr.read_size,
4081 &bwr.read_consumed,
4082 filp->f_flags & O_NONBLOCK);
4083 trace_binder_read_done(ret);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004084 if (!binder_worklist_empty(proc, &proc->todo))
Tair Rzayev78260ac2014-06-03 22:27:21 +03004085 wake_up_interruptible(&proc->wait);
4086 if (ret < 0) {
4087 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4088 ret = -EFAULT;
4089 goto out;
4090 }
4091 }
4092 binder_debug(BINDER_DEBUG_READ_WRITE,
4093 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4094 proc->pid, thread->pid,
4095 (u64)bwr.write_consumed, (u64)bwr.write_size,
4096 (u64)bwr.read_consumed, (u64)bwr.read_size);
4097 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4098 ret = -EFAULT;
4099 goto out;
4100 }
4101out:
4102 return ret;
4103}
4104
4105static int binder_ioctl_set_ctx_mgr(struct file *filp)
4106{
4107 int ret = 0;
4108 struct binder_proc *proc = filp->private_data;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004109 struct binder_context *context = proc->context;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004110 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004111 kuid_t curr_euid = current_euid();
4112
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004113 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004114 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004115 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4116 ret = -EBUSY;
4117 goto out;
4118 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004119 ret = security_binder_set_context_mgr(proc->tsk);
4120 if (ret < 0)
4121 goto out;
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004122 if (uid_valid(context->binder_context_mgr_uid)) {
4123 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004124 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4125 from_kuid(&init_user_ns, curr_euid),
4126 from_kuid(&init_user_ns,
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004127 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004128 ret = -EPERM;
4129 goto out;
4130 }
4131 } else {
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004132 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004133 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004134 new_node = binder_new_node(proc, NULL);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004135 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004136 ret = -ENOMEM;
4137 goto out;
4138 }
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004139 binder_node_lock(new_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004140 new_node->local_weak_refs++;
4141 new_node->local_strong_refs++;
4142 new_node->has_strong_ref = 1;
4143 new_node->has_weak_ref = 1;
4144 context->binder_context_mgr_node = new_node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004145 binder_node_unlock(new_node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004146 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004147out:
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004148 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004149 return ret;
4150}
4151
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004152static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4153{
4154 int ret;
4155 struct binder_proc *proc = filp->private_data;
4156 struct binder_thread *thread;
4157 unsigned int size = _IOC_SIZE(cmd);
4158 void __user *ubuf = (void __user *)arg;
4159
Tair Rzayev78260ac2014-06-03 22:27:21 +03004160 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4161 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004162
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004163 trace_binder_ioctl(cmd, arg);
4164
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004165 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4166 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004167 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004168
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004169 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004170 thread = binder_get_thread(proc);
4171 if (thread == NULL) {
4172 ret = -ENOMEM;
4173 goto err;
4174 }
4175
4176 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004177 case BINDER_WRITE_READ:
4178 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4179 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004180 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004181 break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004182 case BINDER_SET_MAX_THREADS:
4183 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
4184 ret = -EINVAL;
4185 goto err;
4186 }
4187 break;
4188 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03004189 ret = binder_ioctl_set_ctx_mgr(filp);
4190 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004191 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004192 break;
4193 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304194 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004195 proc->pid, thread->pid);
Todd Kjos2f993e22017-05-12 14:42:55 -07004196 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004197 thread = NULL;
4198 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004199 case BINDER_VERSION: {
4200 struct binder_version __user *ver = ubuf;
4201
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004202 if (size != sizeof(struct binder_version)) {
4203 ret = -EINVAL;
4204 goto err;
4205 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02004206 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4207 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004208 ret = -EINVAL;
4209 goto err;
4210 }
4211 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02004212 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004213 default:
4214 ret = -EINVAL;
4215 goto err;
4216 }
4217 ret = 0;
4218err:
4219 if (thread)
Todd Kjos6798e6d2017-01-06 14:19:25 -08004220 thread->looper_need_return = false;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004221 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004222 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4223 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05304224 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004225err_unlocked:
4226 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004227 return ret;
4228}
4229
4230static void binder_vma_open(struct vm_area_struct *vma)
4231{
4232 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004233
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004234 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304235 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004236 proc->pid, vma->vm_start, vma->vm_end,
4237 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4238 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004239}
4240
4241static void binder_vma_close(struct vm_area_struct *vma)
4242{
4243 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004244
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004245 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304246 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004247 proc->pid, vma->vm_start, vma->vm_end,
4248 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4249 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjosd325d372016-10-10 10:40:53 -07004250 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004251 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4252}
4253
Vinayak Menonddac7d52014-06-02 18:17:59 +05304254static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4255{
4256 return VM_FAULT_SIGBUS;
4257}
4258
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004259static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004260 .open = binder_vma_open,
4261 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05304262 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004263};
4264
Todd Kjosd325d372016-10-10 10:40:53 -07004265static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4266{
4267 int ret;
4268 struct binder_proc *proc = filp->private_data;
4269 const char *failure_string;
4270
4271 if (proc->tsk != current->group_leader)
4272 return -EINVAL;
4273
4274 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4275 vma->vm_end = vma->vm_start + SZ_4M;
4276
4277 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4278 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4279 __func__, proc->pid, vma->vm_start, vma->vm_end,
4280 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4281 (unsigned long)pgprot_val(vma->vm_page_prot));
4282
4283 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4284 ret = -EPERM;
4285 failure_string = "bad vm_flags";
4286 goto err_bad_arg;
4287 }
4288 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4289 vma->vm_ops = &binder_vm_ops;
4290 vma->vm_private_data = proc;
4291
4292 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4293 if (ret)
4294 return ret;
4295 proc->files = get_files_struct(current);
4296 return 0;
4297
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004298err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04004299 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004300 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4301 return ret;
4302}
4303
4304static int binder_open(struct inode *nodp, struct file *filp)
4305{
4306 struct binder_proc *proc;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004307 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004308
4309 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4310 current->group_leader->pid, current->pid);
4311
4312 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4313 if (proc == NULL)
4314 return -ENOMEM;
Todd Kjosfc7a7e22017-05-29 16:44:24 -07004315 spin_lock_init(&proc->inner_lock);
4316 spin_lock_init(&proc->outer_lock);
Martijn Coenen872c26e2017-03-07 15:51:18 +01004317 get_task_struct(current->group_leader);
4318 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004319 INIT_LIST_HEAD(&proc->todo);
4320 init_waitqueue_head(&proc->wait);
4321 proc->default_priority = task_nice(current);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02004322 binder_dev = container_of(filp->private_data, struct binder_device,
4323 miscdev);
4324 proc->context = &binder_dev->context;
Todd Kjosd325d372016-10-10 10:40:53 -07004325 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004326
4327 binder_lock(__func__);
4328
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004329 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004330 proc->pid = current->group_leader->pid;
4331 INIT_LIST_HEAD(&proc->delivered_death);
4332 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004333
4334 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004335
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004336 mutex_lock(&binder_procs_lock);
4337 hlist_add_head(&proc->proc_node, &binder_procs);
4338 mutex_unlock(&binder_procs_lock);
4339
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004340 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004341 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09004342
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004343 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004344 /*
4345 * proc debug entries are shared between contexts, so
4346 * this will fail if the process tries to open the driver
4347 * again with a different context. The priting code will
4348 * anyway print all contexts that a given PID has, so this
4349 * is not a problem.
4350 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004351 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004352 binder_debugfs_dir_entry_proc,
4353 (void *)(unsigned long)proc->pid,
4354 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004355 }
4356
4357 return 0;
4358}
4359
4360static int binder_flush(struct file *filp, fl_owner_t id)
4361{
4362 struct binder_proc *proc = filp->private_data;
4363
4364 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4365
4366 return 0;
4367}
4368
4369static void binder_deferred_flush(struct binder_proc *proc)
4370{
4371 struct rb_node *n;
4372 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09004373
Todd Kjosb4827902017-05-25 15:52:17 -07004374 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004375 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4376 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09004377
Todd Kjos6798e6d2017-01-06 14:19:25 -08004378 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004379 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4380 wake_up_interruptible(&thread->wait);
4381 wake_count++;
4382 }
4383 }
Todd Kjosb4827902017-05-25 15:52:17 -07004384 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004385 wake_up_interruptible_all(&proc->wait);
4386
4387 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4388 "binder_flush: %d woke %d threads\n", proc->pid,
4389 wake_count);
4390}
4391
4392static int binder_release(struct inode *nodp, struct file *filp)
4393{
4394 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09004395
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004396 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004397 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4398
4399 return 0;
4400}
4401
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004402static int binder_node_release(struct binder_node *node, int refs)
4403{
4404 struct binder_ref *ref;
4405 int death = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004406 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004407
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004408 binder_release_work(proc, &node->async_todo);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004409
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004410 binder_node_lock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004411 binder_inner_proc_lock(proc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004412 binder_dequeue_work_ilocked(&node->work);
Todd Kjosf22abc72017-05-09 11:08:05 -07004413 /*
4414 * The caller must have taken a temporary ref on the node,
4415 */
4416 BUG_ON(!node->tmp_refs);
4417 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjose7f23ed2017-03-21 13:06:01 -07004418 binder_inner_proc_unlock(proc);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004419 binder_node_unlock(node);
Todd Kjose7f23ed2017-03-21 13:06:01 -07004420 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004421
4422 return refs;
4423 }
4424
4425 node->proc = NULL;
4426 node->local_strong_refs = 0;
4427 node->local_weak_refs = 0;
Todd Kjose7f23ed2017-03-21 13:06:01 -07004428 binder_inner_proc_unlock(proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004429
4430 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004431 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004432 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004433
4434 hlist_for_each_entry(ref, &node->refs, node_entry) {
4435 refs++;
4436
4437 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08004438 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004439
4440 death++;
4441
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004442 binder_inner_proc_lock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004443 if (list_empty(&ref->death->work.entry)) {
4444 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004445 binder_enqueue_work_ilocked(&ref->death->work,
4446 &ref->proc->todo);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004447 wake_up_interruptible(&ref->proc->wait);
4448 } else
4449 BUG();
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004450 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004451 }
4452
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004453 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4454 "node %d now dead, refs %d, death %d\n",
4455 node->debug_id, refs, death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004456 binder_node_unlock(node);
Todd Kjosf22abc72017-05-09 11:08:05 -07004457 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004458
4459 return refs;
4460}
4461
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004462static void binder_deferred_release(struct binder_proc *proc)
4463{
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004464 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004465 struct rb_node *n;
Todd Kjosd325d372016-10-10 10:40:53 -07004466 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004467
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004468 BUG_ON(proc->files);
4469
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004470 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004471 hlist_del(&proc->proc_node);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004472 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004473
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004474 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004475 if (context->binder_context_mgr_node &&
4476 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004477 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004478 "%s: %d context_mgr_node gone\n",
4479 __func__, proc->pid);
Martijn Coenen0b3311e2016-09-30 15:51:48 +02004480 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004481 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004482 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjosb4827902017-05-25 15:52:17 -07004483 binder_inner_proc_lock(proc);
Todd Kjos2f993e22017-05-12 14:42:55 -07004484 /*
4485 * Make sure proc stays alive after we
4486 * remove all the threads
4487 */
4488 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004489
Todd Kjos2f993e22017-05-12 14:42:55 -07004490 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004491 threads = 0;
4492 active_transactions = 0;
4493 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004494 struct binder_thread *thread;
4495
4496 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjosb4827902017-05-25 15:52:17 -07004497 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004498 threads++;
Todd Kjos2f993e22017-05-12 14:42:55 -07004499 active_transactions += binder_thread_release(proc, thread);
Todd Kjosb4827902017-05-25 15:52:17 -07004500 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004501 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004502
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004503 nodes = 0;
4504 incoming_refs = 0;
4505 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004506 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004507
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004508 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004509 nodes++;
Todd Kjosf22abc72017-05-09 11:08:05 -07004510 /*
4511 * take a temporary ref on the node before
4512 * calling binder_node_release() which will either
4513 * kfree() the node or call binder_put_node()
4514 */
Todd Kjos425d23f2017-06-12 12:07:26 -07004515 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004516 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjos425d23f2017-06-12 12:07:26 -07004517 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01004518 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjos425d23f2017-06-12 12:07:26 -07004519 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004520 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004521 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004522
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004523 outgoing_refs = 0;
4524 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004525 struct binder_ref *ref;
4526
4527 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004528 outgoing_refs++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07004529 binder_cleanup_ref(ref);
4530 binder_free_ref(ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004531 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01004532
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004533 binder_release_work(proc, &proc->todo);
4534 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004535
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004536 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjosd325d372016-10-10 10:40:53 -07004537 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01004538 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjosd325d372016-10-10 10:40:53 -07004539 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004540
Todd Kjos2f993e22017-05-12 14:42:55 -07004541 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004542}
4543
4544static void binder_deferred_func(struct work_struct *work)
4545{
4546 struct binder_proc *proc;
4547 struct files_struct *files;
4548
4549 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09004550
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004551 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004552 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004553 mutex_lock(&binder_deferred_lock);
4554 if (!hlist_empty(&binder_deferred_list)) {
4555 proc = hlist_entry(binder_deferred_list.first,
4556 struct binder_proc, deferred_work_node);
4557 hlist_del_init(&proc->deferred_work_node);
4558 defer = proc->deferred_work;
4559 proc->deferred_work = 0;
4560 } else {
4561 proc = NULL;
4562 defer = 0;
4563 }
4564 mutex_unlock(&binder_deferred_lock);
4565
4566 files = NULL;
4567 if (defer & BINDER_DEFERRED_PUT_FILES) {
4568 files = proc->files;
4569 if (files)
4570 proc->files = NULL;
4571 }
4572
4573 if (defer & BINDER_DEFERRED_FLUSH)
4574 binder_deferred_flush(proc);
4575
4576 if (defer & BINDER_DEFERRED_RELEASE)
4577 binder_deferred_release(proc); /* frees proc */
4578
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004579 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004580 if (files)
4581 put_files_struct(files);
4582 } while (proc);
4583}
4584static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4585
4586static void
4587binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4588{
4589 mutex_lock(&binder_deferred_lock);
4590 proc->deferred_work |= defer;
4591 if (hlist_unhashed(&proc->deferred_work_node)) {
4592 hlist_add_head(&proc->deferred_work_node,
4593 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05304594 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004595 }
4596 mutex_unlock(&binder_deferred_lock);
4597}
4598
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004599static void print_binder_transaction(struct seq_file *m, const char *prefix,
4600 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004601{
Todd Kjos2f993e22017-05-12 14:42:55 -07004602 spin_lock(&t->lock);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004603 seq_printf(m,
4604 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4605 prefix, t->debug_id, t,
4606 t->from ? t->from->proc->pid : 0,
4607 t->from ? t->from->pid : 0,
4608 t->to_proc ? t->to_proc->pid : 0,
4609 t->to_thread ? t->to_thread->pid : 0,
4610 t->code, t->flags, t->priority, t->need_reply);
Todd Kjos2f993e22017-05-12 14:42:55 -07004611 spin_unlock(&t->lock);
4612
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004613 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004614 seq_puts(m, " buffer free\n");
4615 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004616 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004617 if (t->buffer->target_node)
4618 seq_printf(m, " node %d",
4619 t->buffer->target_node->debug_id);
4620 seq_printf(m, " size %zd:%zd data %p\n",
4621 t->buffer->data_size, t->buffer->offsets_size,
4622 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004623}
4624
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004625static void print_binder_work_ilocked(struct seq_file *m, const char *prefix,
4626 const char *transaction_prefix,
4627 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004628{
4629 struct binder_node *node;
4630 struct binder_transaction *t;
4631
4632 switch (w->type) {
4633 case BINDER_WORK_TRANSACTION:
4634 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004635 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004636 break;
Todd Kjos858b8da2017-04-21 17:35:12 -07004637 case BINDER_WORK_RETURN_ERROR: {
4638 struct binder_error *e = container_of(
4639 w, struct binder_error, work);
4640
4641 seq_printf(m, "%stransaction error: %u\n",
4642 prefix, e->cmd);
4643 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004644 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004645 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004646 break;
4647 case BINDER_WORK_NODE:
4648 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08004649 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4650 prefix, node->debug_id,
4651 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004652 break;
4653 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004654 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004655 break;
4656 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004657 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004658 break;
4659 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004660 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004661 break;
4662 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004663 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004664 break;
4665 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004666}
4667
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004668static void print_binder_thread_ilocked(struct seq_file *m,
4669 struct binder_thread *thread,
4670 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004671{
4672 struct binder_transaction *t;
4673 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004674 size_t start_pos = m->count;
4675 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004676
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004677 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
Todd Kjos2f993e22017-05-12 14:42:55 -07004678 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos6798e6d2017-01-06 14:19:25 -08004679 thread->pid, thread->looper,
Todd Kjos2f993e22017-05-12 14:42:55 -07004680 thread->looper_need_return,
4681 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004682 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004683 t = thread->transaction_stack;
4684 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004685 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004686 print_binder_transaction(m,
4687 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004688 t = t->from_parent;
4689 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004690 print_binder_transaction(m,
4691 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004692 t = t->to_parent;
4693 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004694 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004695 t = NULL;
4696 }
4697 }
4698 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004699 print_binder_work_ilocked(m, " ",
4700 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004701 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004702 if (!print_always && m->count == header_pos)
4703 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004704}
4705
Todd Kjos425d23f2017-06-12 12:07:26 -07004706static void print_binder_node_nilocked(struct seq_file *m,
4707 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004708{
4709 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004710 struct binder_work *w;
4711 int count;
4712
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004713 WARN_ON(!spin_is_locked(&node->lock));
Todd Kjos425d23f2017-06-12 12:07:26 -07004714 if (node->proc)
4715 WARN_ON(!spin_is_locked(&node->proc->inner_lock));
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004716
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004717 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08004718 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004719 count++;
4720
Todd Kjosf22abc72017-05-09 11:08:05 -07004721 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08004722 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004723 node->has_strong_ref, node->has_weak_ref,
4724 node->local_strong_refs, node->local_weak_refs,
Todd Kjosf22abc72017-05-09 11:08:05 -07004725 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004726 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004727 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08004728 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004729 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004730 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004731 seq_puts(m, "\n");
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004732 if (node->proc) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004733 list_for_each_entry(w, &node->async_todo, entry)
4734 print_binder_work_ilocked(m, " ",
4735 " pending async transaction", w);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004736 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004737}
4738
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004739static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004740{
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004741 binder_node_lock(ref->node);
Todd Kjosb0117bb2017-05-08 09:16:27 -07004742 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4743 ref->data.debug_id, ref->data.desc,
4744 ref->node->proc ? "" : "dead ",
4745 ref->node->debug_id, ref->data.strong,
4746 ref->data.weak, ref->death);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004747 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004748}
4749
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004750static void print_binder_proc(struct seq_file *m,
4751 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004752{
4753 struct binder_work *w;
4754 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004755 size_t start_pos = m->count;
4756 size_t header_pos;
Todd Kjos425d23f2017-06-12 12:07:26 -07004757 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004758
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004759 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004760 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004761 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004762
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004763 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004764 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004765 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004766 rb_node), print_all);
Todd Kjos425d23f2017-06-12 12:07:26 -07004767
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004768 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004769 struct binder_node *node = rb_entry(n, struct binder_node,
4770 rb_node);
Todd Kjos425d23f2017-06-12 12:07:26 -07004771 /*
4772 * take a temporary reference on the node so it
4773 * survives and isn't removed from the tree
4774 * while we print it.
4775 */
4776 binder_inc_node_tmpref_ilocked(node);
4777 /* Need to drop inner lock to take node lock */
4778 binder_inner_proc_unlock(proc);
4779 if (last_node)
4780 binder_put_node(last_node);
4781 binder_node_inner_lock(node);
4782 print_binder_node_nilocked(m, node);
4783 binder_node_inner_unlock(node);
4784 last_node = node;
4785 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004786 }
Todd Kjos425d23f2017-06-12 12:07:26 -07004787 binder_inner_proc_unlock(proc);
4788 if (last_node)
4789 binder_put_node(last_node);
4790
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004791 if (print_all) {
4792 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004793 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004794 n = rb_next(n))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004795 print_binder_ref(m, rb_entry(n, struct binder_ref,
4796 rb_node_desc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004797 }
Todd Kjosd325d372016-10-10 10:40:53 -07004798 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004799 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004800 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004801 print_binder_work_ilocked(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004802 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004803 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004804 break;
4805 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004806 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004807 if (!print_all && m->count == header_pos)
4808 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004809}
4810
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004811static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004812 "BR_ERROR",
4813 "BR_OK",
4814 "BR_TRANSACTION",
4815 "BR_REPLY",
4816 "BR_ACQUIRE_RESULT",
4817 "BR_DEAD_REPLY",
4818 "BR_TRANSACTION_COMPLETE",
4819 "BR_INCREFS",
4820 "BR_ACQUIRE",
4821 "BR_RELEASE",
4822 "BR_DECREFS",
4823 "BR_ATTEMPT_ACQUIRE",
4824 "BR_NOOP",
4825 "BR_SPAWN_LOOPER",
4826 "BR_FINISHED",
4827 "BR_DEAD_BINDER",
4828 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4829 "BR_FAILED_REPLY"
4830};
4831
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004832static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004833 "BC_TRANSACTION",
4834 "BC_REPLY",
4835 "BC_ACQUIRE_RESULT",
4836 "BC_FREE_BUFFER",
4837 "BC_INCREFS",
4838 "BC_ACQUIRE",
4839 "BC_RELEASE",
4840 "BC_DECREFS",
4841 "BC_INCREFS_DONE",
4842 "BC_ACQUIRE_DONE",
4843 "BC_ATTEMPT_ACQUIRE",
4844 "BC_REGISTER_LOOPER",
4845 "BC_ENTER_LOOPER",
4846 "BC_EXIT_LOOPER",
4847 "BC_REQUEST_DEATH_NOTIFICATION",
4848 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen5a6da532016-09-30 14:10:07 +02004849 "BC_DEAD_BINDER_DONE",
4850 "BC_TRANSACTION_SG",
4851 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004852};
4853
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10004854static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004855 "proc",
4856 "thread",
4857 "node",
4858 "ref",
4859 "death",
4860 "transaction",
4861 "transaction_complete"
4862};
4863
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004864static void print_binder_stats(struct seq_file *m, const char *prefix,
4865 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004866{
4867 int i;
4868
4869 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004870 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004871 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004872 int temp = atomic_read(&stats->bc[i]);
4873
4874 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004875 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004876 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004877 }
4878
4879 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004880 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004881 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004882 int temp = atomic_read(&stats->br[i]);
4883
4884 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004885 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004886 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004887 }
4888
4889 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004890 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004891 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004892 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004893 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004894 int created = atomic_read(&stats->obj_created[i]);
4895 int deleted = atomic_read(&stats->obj_deleted[i]);
4896
4897 if (created || deleted)
4898 seq_printf(m, "%s%s: active %d total %d\n",
4899 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004900 binder_objstat_strings[i],
Badhri Jagan Sridharan5551ff22016-10-13 16:36:15 -07004901 created - deleted,
4902 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004903 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004904}
4905
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004906static void print_binder_proc_stats(struct seq_file *m,
4907 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004908{
4909 struct binder_work *w;
4910 struct rb_node *n;
4911 int count, strong, weak;
Todd Kjosb4827902017-05-25 15:52:17 -07004912 size_t free_async_space =
4913 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004914
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004915 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02004916 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004917 count = 0;
Todd Kjosb4827902017-05-25 15:52:17 -07004918 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004919 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4920 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004921 seq_printf(m, " threads: %d\n", count);
4922 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004923 " ready threads %d\n"
4924 " free async space %zd\n", proc->requested_threads,
4925 proc->requested_threads_started, proc->max_threads,
Todd Kjosd325d372016-10-10 10:40:53 -07004926 proc->ready_threads,
Todd Kjosb4827902017-05-25 15:52:17 -07004927 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004928 count = 0;
4929 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4930 count++;
Todd Kjos425d23f2017-06-12 12:07:26 -07004931 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004932 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004933 count = 0;
4934 strong = 0;
4935 weak = 0;
4936 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4937 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4938 rb_node_desc);
4939 count++;
Todd Kjosb0117bb2017-05-08 09:16:27 -07004940 strong += ref->data.strong;
4941 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004942 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004943 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004944
Todd Kjosd325d372016-10-10 10:40:53 -07004945 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004946 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004947
4948 count = 0;
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004949 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004950 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004951 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004952 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004953 }
Todd Kjos1c89e6b2016-10-20 10:33:00 -07004954 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004955 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004956
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004957 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004958}
4959
4960
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004961static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004962{
4963 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004964 struct binder_node *node;
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004965 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004966
Todd Kjos48b33212017-05-24 11:53:13 -07004967 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004968
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004969 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004970
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004971 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004972 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004973 seq_puts(m, "dead nodes:\n");
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004974 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
4975 /*
4976 * take a temporary reference on the node so it
4977 * survives and isn't removed from the list
4978 * while we print it.
4979 */
4980 node->tmp_refs++;
4981 spin_unlock(&binder_dead_nodes_lock);
4982 if (last_node)
4983 binder_put_node(last_node);
4984 binder_node_lock(node);
Todd Kjos425d23f2017-06-12 12:07:26 -07004985 print_binder_node_nilocked(m, node);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004986 binder_node_unlock(node);
4987 last_node = node;
4988 spin_lock(&binder_dead_nodes_lock);
4989 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004990 spin_unlock(&binder_dead_nodes_lock);
Todd Kjoscbcbbd62017-06-08 13:45:59 -07004991 if (last_node)
4992 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004993
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004994 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08004995 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004996 print_binder_proc(m, proc, 1);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07004997 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07004998 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004999 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005000}
5001
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005002static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005003{
5004 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005005
Todd Kjos48b33212017-05-24 11:53:13 -07005006 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005007
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005008 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005009
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005010 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005011
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005012 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005013 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005014 print_binder_proc_stats(m, proc);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005015 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005016 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005017 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005018}
5019
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005020static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005021{
5022 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005023
Todd Kjos48b33212017-05-24 11:53:13 -07005024 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005025
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005026 seq_puts(m, "binder transactions:\n");
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005027 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005028 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005029 print_binder_proc(m, proc, 0);
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005030 mutex_unlock(&binder_procs_lock);
Todd Kjos48b33212017-05-24 11:53:13 -07005031 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005032 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005033}
5034
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005035static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005036{
Riley Andrews83050a42016-02-09 21:05:33 -08005037 struct binder_proc *itr;
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005038 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005039
Todd Kjos48b33212017-05-24 11:53:13 -07005040 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08005041
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005042 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08005043 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005044 if (itr->pid == pid) {
5045 seq_puts(m, "binder proc state:\n");
5046 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08005047 }
5048 }
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005049 mutex_unlock(&binder_procs_lock);
5050
Todd Kjos48b33212017-05-24 11:53:13 -07005051 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005052 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005053}
5054
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005055static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005056 struct binder_transaction_log_entry *e)
5057{
Todd Kjos1cfe6272017-05-24 13:33:28 -07005058 int debug_id = READ_ONCE(e->debug_id_done);
5059 /*
5060 * read barrier to guarantee debug_id_done read before
5061 * we print the log values
5062 */
5063 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005064 seq_printf(m,
Todd Kjos1cfe6272017-05-24 13:33:28 -07005065 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005066 e->debug_id, (e->call_type == 2) ? "reply" :
5067 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen63b9f3b2016-10-17 15:17:31 +02005068 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjose598d172017-03-22 17:19:52 -07005069 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5070 e->return_error, e->return_error_param,
5071 e->return_error_line);
Todd Kjos1cfe6272017-05-24 13:33:28 -07005072 /*
5073 * read-barrier to guarantee read of debug_id_done after
5074 * done printing the fields of the entry
5075 */
5076 smp_rmb();
5077 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5078 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005079}
5080
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005081static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005082{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005083 struct binder_transaction_log *log = m->private;
Todd Kjos1cfe6272017-05-24 13:33:28 -07005084 unsigned int log_cur = atomic_read(&log->cur);
5085 unsigned int count;
5086 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005087 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005088
Todd Kjos1cfe6272017-05-24 13:33:28 -07005089 count = log_cur + 1;
5090 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5091 0 : count % ARRAY_SIZE(log->entry);
5092 if (count > ARRAY_SIZE(log->entry) || log->full)
5093 count = ARRAY_SIZE(log->entry);
5094 for (i = 0; i < count; i++) {
5095 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5096
5097 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005098 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005099 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005100}
5101
5102static const struct file_operations binder_fops = {
5103 .owner = THIS_MODULE,
5104 .poll = binder_poll,
5105 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08005106 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005107 .mmap = binder_mmap,
5108 .open = binder_open,
5109 .flush = binder_flush,
5110 .release = binder_release,
5111};
5112
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005113BINDER_DEBUG_ENTRY(state);
5114BINDER_DEBUG_ENTRY(stats);
5115BINDER_DEBUG_ENTRY(transactions);
5116BINDER_DEBUG_ENTRY(transaction_log);
5117
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005118static int __init init_binder_device(const char *name)
5119{
5120 int ret;
5121 struct binder_device *binder_device;
5122
5123 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5124 if (!binder_device)
5125 return -ENOMEM;
5126
5127 binder_device->miscdev.fops = &binder_fops;
5128 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5129 binder_device->miscdev.name = name;
5130
5131 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5132 binder_device->context.name = name;
Todd Kjos8d9f6f32016-10-17 12:33:15 -07005133 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005134
5135 ret = misc_register(&binder_device->miscdev);
5136 if (ret < 0) {
5137 kfree(binder_device);
5138 return ret;
5139 }
5140
5141 hlist_add_head(&binder_device->hlist, &binder_devices);
5142
5143 return ret;
5144}
5145
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005146static int __init binder_init(void)
5147{
5148 int ret;
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005149 char *device_name, *device_names;
5150 struct binder_device *device;
5151 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005152
Todd Kjos1cfe6272017-05-24 13:33:28 -07005153 atomic_set(&binder_transaction_log.cur, ~0U);
5154 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5155
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005156 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5157 if (binder_debugfs_dir_entry_root)
5158 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5159 binder_debugfs_dir_entry_root);
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005160
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005161 if (binder_debugfs_dir_entry_root) {
5162 debugfs_create_file("state",
5163 S_IRUGO,
5164 binder_debugfs_dir_entry_root,
5165 NULL,
5166 &binder_state_fops);
5167 debugfs_create_file("stats",
5168 S_IRUGO,
5169 binder_debugfs_dir_entry_root,
5170 NULL,
5171 &binder_stats_fops);
5172 debugfs_create_file("transactions",
5173 S_IRUGO,
5174 binder_debugfs_dir_entry_root,
5175 NULL,
5176 &binder_transactions_fops);
5177 debugfs_create_file("transaction_log",
5178 S_IRUGO,
5179 binder_debugfs_dir_entry_root,
5180 &binder_transaction_log,
5181 &binder_transaction_log_fops);
5182 debugfs_create_file("failed_transaction_log",
5183 S_IRUGO,
5184 binder_debugfs_dir_entry_root,
5185 &binder_transaction_log_failed,
5186 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005187 }
Martijn Coenen6b7c7122016-09-30 16:08:09 +02005188
5189 /*
5190 * Copy the module_parameter string, because we don't want to
5191 * tokenize it in-place.
5192 */
5193 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5194 if (!device_names) {
5195 ret = -ENOMEM;
5196 goto err_alloc_device_names_failed;
5197 }
5198 strcpy(device_names, binder_devices_param);
5199
5200 while ((device_name = strsep(&device_names, ","))) {
5201 ret = init_binder_device(device_name);
5202 if (ret)
5203 goto err_init_binder_device_failed;
5204 }
5205
5206 return ret;
5207
5208err_init_binder_device_failed:
5209 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5210 misc_deregister(&device->miscdev);
5211 hlist_del(&device->hlist);
5212 kfree(device);
5213 }
5214err_alloc_device_names_failed:
5215 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5216
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005217 return ret;
5218}
5219
5220device_initcall(binder_init);
5221
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005222#define CREATE_TRACE_POINTS
5223#include "binder_trace.h"
5224
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005225MODULE_LICENSE("GPL v2");