blob: c35d9494f40cb7fc134cc0b53bafd0142ab876ee [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Todd Kjos9630fe82017-06-29 12:02:00 -070018/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
Martijn Coenen1b77e9d2017-08-31 10:04:18 +020031 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
Todd Kjos9630fe82017-06-29 12:02:00 -070035 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
Anmol Sarma56b468f2012-10-30 22:35:43 +053052#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054#include <linux/fdtable.h>
55#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000056#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090057#include <linux/fs.h>
58#include <linux/list.h>
59#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090060#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/nsproxy.h>
63#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070064#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090065#include <linux/rbtree.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010066#include <linux/sched/signal.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010067#include <linux/sched/mm.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090069#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080070#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050071#include <linux/security.h>
Todd Kjos9630fe82017-06-29 12:02:00 -070072#include <linux/spinlock.h>
Sherry Yang128f3802018-08-07 12:57:13 -070073#include <linux/ratelimit.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090074
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020075#include <uapi/linux/android/binder.h>
Martijn Coenence388e02017-06-06 17:04:42 -070076#include <uapi/linux/sched/types.h>
Guenter Roeckf371a7c2018-07-23 14:41:38 -070077
78#include <asm/cacheflush.h>
79
Todd Kjos0c972a02017-06-29 12:01:41 -070080#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070081#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090082
Todd Kjosc44b1232017-06-29 12:01:43 -070083static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090084static DEFINE_MUTEX(binder_deferred_lock);
85
Martijn Coenenac4812c2017-02-03 14:40:48 -080086static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090087static HLIST_HEAD(binder_procs);
Todd Kjosc44b1232017-06-29 12:01:43 -070088static DEFINE_MUTEX(binder_procs_lock);
89
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090090static HLIST_HEAD(binder_dead_nodes);
Todd Kjosc44b1232017-06-29 12:01:43 -070091static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090092
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070093static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjos656a8002017-06-29 12:01:45 -070095static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090096
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070097#define BINDER_DEBUG_ENTRY(name) \
98static int binder_##name##_open(struct inode *inode, struct file *file) \
99{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700100 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -0700101} \
102\
103static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
106 .read = seq_read, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
109}
110
111static int binder_proc_show(struct seq_file *m, void *unused);
112BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900113
114/* This is only defined in include/asm-arm/sizes.h */
115#ifndef SZ_1K
116#define SZ_1K 0x400
117#endif
118
119#ifndef SZ_4M
120#define SZ_4M 0x400000
121#endif
122
123#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
124
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900125enum {
126 BINDER_DEBUG_USER_ERROR = 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
132 BINDER_DEBUG_READ_WRITE = 1U << 6,
133 BINDER_DEBUG_USER_REFS = 1U << 7,
134 BINDER_DEBUG_THREADS = 1U << 8,
135 BINDER_DEBUG_TRANSACTION = 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjos19c98722017-06-29 12:01:40 -0700139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Todd Kjos9630fe82017-06-29 12:02:00 -0700140 BINDER_DEBUG_SPINLOCKS = 1U << 14,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900141};
142static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
Harsh Shandilya21d02dd2017-12-22 19:37:02 +0530144module_param_named(debug_mask, binder_debug_mask, uint, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900145
Martijn Coenenac4812c2017-02-03 14:40:48 -0800146static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147module_param_named(devices, binder_devices_param, charp, 0444);
148
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900149static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150static int binder_stop_on_user_error;
151
152static int binder_set_stop_on_user_error(const char *val,
Kees Cooke4dca7b2017-10-17 19:04:42 -0700153 const struct kernel_param *kp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900154{
155 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900156
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900157 ret = param_set_int(val, kp);
158 if (binder_stop_on_user_error < 2)
159 wake_up(&binder_user_error_wait);
160 return ret;
161}
162module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
Harsh Shandilya21d02dd2017-12-22 19:37:02 +0530163 param_get_int, &binder_stop_on_user_error, 0644);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900164
165#define binder_debug(mask, x...) \
166 do { \
167 if (binder_debug_mask & mask) \
Sherry Yang128f3802018-08-07 12:57:13 -0700168 pr_info_ratelimited(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900169 } while (0)
170
171#define binder_user_error(x...) \
172 do { \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherry Yang128f3802018-08-07 12:57:13 -0700174 pr_info_ratelimited(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
177 } while (0)
178
Martijn Coenenfeba3902017-02-03 14:40:45 -0800179#define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
181
182#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
Martijn Coenen79802402017-02-03 14:40:51 -0800184#define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
186
Martijn Coenendef95c72017-02-03 14:40:52 -0800187#define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
189
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900190enum binder_stat_types {
191 BINDER_STAT_PROC,
192 BINDER_STAT_THREAD,
193 BINDER_STAT_NODE,
194 BINDER_STAT_REF,
195 BINDER_STAT_DEATH,
196 BINDER_STAT_TRANSACTION,
197 BINDER_STAT_TRANSACTION_COMPLETE,
198 BINDER_STAT_COUNT
199};
200
201struct binder_stats {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -0700202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 atomic_t obj_created[BINDER_STAT_COUNT];
205 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900206};
207
208static struct binder_stats binder_stats;
209
210static inline void binder_stats_deleted(enum binder_stat_types type)
211{
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -0700212 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900213}
214
215static inline void binder_stats_created(enum binder_stat_types type)
216{
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -0700217 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900218}
219
220struct binder_transaction_log_entry {
221 int debug_id;
Todd Kjosd99c7332017-06-29 12:01:53 -0700222 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900223 int call_type;
224 int from_proc;
225 int from_thread;
226 int target_handle;
227 int to_proc;
228 int to_thread;
229 int to_node;
230 int data_size;
231 int offsets_size;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700232 int return_error_line;
233 uint32_t return_error;
234 uint32_t return_error_param;
Martijn Coenen14db3182017-02-03 14:40:47 -0800235 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900236};
237struct binder_transaction_log {
Todd Kjosd99c7332017-06-29 12:01:53 -0700238 atomic_t cur;
239 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900240 struct binder_transaction_log_entry entry[32];
241};
242static struct binder_transaction_log binder_transaction_log;
243static struct binder_transaction_log binder_transaction_log_failed;
244
245static struct binder_transaction_log_entry *binder_transaction_log_add(
246 struct binder_transaction_log *log)
247{
248 struct binder_transaction_log_entry *e;
Todd Kjosd99c7332017-06-29 12:01:53 -0700249 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900250
Todd Kjosd99c7332017-06-29 12:01:53 -0700251 if (cur >= ARRAY_SIZE(log->entry))
Gustavo A. R. Silva197410a2018-01-23 12:04:27 -0600252 log->full = true;
Todd Kjosd99c7332017-06-29 12:01:53 -0700253 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 WRITE_ONCE(e->debug_id_done, 0);
255 /*
256 * write-barrier to synchronize access to e->debug_id_done.
257 * We make sure the initialized 0 value is seen before
258 * memset() other fields are zeroed by memset.
259 */
260 smp_wmb();
261 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900262 return e;
263}
264
Martijn Coenen342e5c92017-02-03 14:40:46 -0800265struct binder_context {
266 struct binder_node *binder_context_mgr_node;
Todd Kjosc44b1232017-06-29 12:01:43 -0700267 struct mutex context_mgr_node_lock;
268
Martijn Coenen342e5c92017-02-03 14:40:46 -0800269 kuid_t binder_context_mgr_uid;
Martijn Coenen14db3182017-02-03 14:40:47 -0800270 const char *name;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800271};
272
Martijn Coenenac4812c2017-02-03 14:40:48 -0800273struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800277};
278
Todd Kjos72196392017-06-29 12:02:02 -0700279/**
280 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list
282 * @type: type of work to be performed
283 *
284 * There are separate work lists for proc, thread, and node (async).
285 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900286struct binder_work {
287 struct list_head entry;
Todd Kjos72196392017-06-29 12:02:02 -0700288
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900289 enum {
290 BINDER_WORK_TRANSACTION = 1,
291 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos26549d12017-06-29 12:01:55 -0700292 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900293 BINDER_WORK_NODE,
294 BINDER_WORK_DEAD_BINDER,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297 } type;
298};
299
Todd Kjos26549d12017-06-29 12:01:55 -0700300struct binder_error {
301 struct binder_work work;
302 uint32_t cmd;
303};
304
Todd Kjos9630fe82017-06-29 12:02:00 -0700305/**
306 * struct binder_node - binder node bookkeeping
307 * @debug_id: unique ID for debugging
308 * (invariant after initialized)
309 * @lock: lock for node fields
310 * @work: worklist element for node work
Todd Kjos72196392017-06-29 12:02:02 -0700311 * (protected by @proc->inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700312 * @rb_node: element for proc->nodes tree
Todd Kjosda0fa9e2017-06-29 12:02:04 -0700313 * (protected by @proc->inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700314 * @dead_node: element for binder_dead_nodes list
315 * (protected by binder_dead_nodes_lock)
316 * @proc: binder_proc that owns this node
317 * (invariant after initialized)
318 * @refs: list of references on this node
Todd Kjos673068e2017-06-29 12:02:03 -0700319 * (protected by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700320 * @internal_strong_refs: used to take strong references when
321 * initiating a transaction
Todd Kjosed297212017-06-29 12:02:01 -0700322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700324 * @local_weak_refs: weak user refs from local process
Todd Kjosed297212017-06-29 12:02:01 -0700325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700327 * @local_strong_refs: strong user refs from local process
Todd Kjosed297212017-06-29 12:02:01 -0700328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700330 * @tmp_refs: temporary kernel refs
Todd Kjosed297212017-06-29 12:02:01 -0700331 * (protected by @proc->inner_lock while @proc
332 * is valid, and by binder_dead_nodes_lock
333 * if @proc is NULL. During inc/dec and node release
334 * it is also protected by @lock to provide safety
335 * as the node dies and @proc becomes NULL)
Todd Kjos9630fe82017-06-29 12:02:00 -0700336 * @ptr: userspace pointer for node
337 * (invariant, no lock needed)
338 * @cookie: userspace cookie for node
339 * (invariant, no lock needed)
340 * @has_strong_ref: userspace notified of strong ref
Todd Kjosed297212017-06-29 12:02:01 -0700341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700343 * @pending_strong_ref: userspace has acked notification of strong ref
Todd Kjosed297212017-06-29 12:02:01 -0700344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700346 * @has_weak_ref: userspace notified of weak ref
Todd Kjosed297212017-06-29 12:02:01 -0700347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700349 * @pending_weak_ref: userspace has acked notification of weak ref
Todd Kjosed297212017-06-29 12:02:01 -0700350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700352 * @has_async_transaction: async transaction to node in progress
Todd Kjos673068e2017-06-29 12:02:03 -0700353 * (protected by @lock)
Martijn Coenence388e02017-06-06 17:04:42 -0700354 * @sched_policy: minimum scheduling policy for node
355 * (invariant after initialized)
Todd Kjos9630fe82017-06-29 12:02:00 -0700356 * @accept_fds: file descriptor operations supported for node
357 * (invariant after initialized)
358 * @min_priority: minimum scheduling priority
359 * (invariant after initialized)
Martijn Coenence388e02017-06-06 17:04:42 -0700360 * @inherit_rt: inherit RT scheduling policy from caller
Todd Kjos00bac142019-01-14 09:10:21 -0800361 * @txn_security_ctx: require sender's security context
Martijn Coenence388e02017-06-06 17:04:42 -0700362 * (invariant after initialized)
Todd Kjos9630fe82017-06-29 12:02:00 -0700363 * @async_todo: list of async work items
Todd Kjos72196392017-06-29 12:02:02 -0700364 * (protected by @proc->inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700365 *
366 * Bookkeeping structure for binder nodes.
367 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900368struct binder_node {
369 int debug_id;
Todd Kjos9630fe82017-06-29 12:02:00 -0700370 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900371 struct binder_work work;
372 union {
373 struct rb_node rb_node;
374 struct hlist_node dead_node;
375 };
376 struct binder_proc *proc;
377 struct hlist_head refs;
378 int internal_strong_refs;
379 int local_weak_refs;
380 int local_strong_refs;
Todd Kjosadc18842017-06-29 12:01:59 -0700381 int tmp_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800382 binder_uintptr_t ptr;
383 binder_uintptr_t cookie;
Todd Kjosed297212017-06-29 12:02:01 -0700384 struct {
385 /*
386 * bitfield elements protected by
387 * proc inner_lock
388 */
389 u8 has_strong_ref:1;
390 u8 pending_strong_ref:1;
391 u8 has_weak_ref:1;
392 u8 pending_weak_ref:1;
393 };
394 struct {
395 /*
396 * invariant after initialization
397 */
Martijn Coenence388e02017-06-06 17:04:42 -0700398 u8 sched_policy:2;
399 u8 inherit_rt:1;
Todd Kjosed297212017-06-29 12:02:01 -0700400 u8 accept_fds:1;
Todd Kjos00bac142019-01-14 09:10:21 -0800401 u8 txn_security_ctx:1;
Todd Kjosed297212017-06-29 12:02:01 -0700402 u8 min_priority;
403 };
404 bool has_async_transaction;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900405 struct list_head async_todo;
406};
407
408struct binder_ref_death {
Todd Kjos72196392017-06-29 12:02:02 -0700409 /**
410 * @work: worklist element for death notifications
411 * (protected by inner_lock of the proc that
412 * this ref belongs to)
413 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900414 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800415 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900416};
417
Todd Kjos372e3142017-06-29 12:01:58 -0700418/**
419 * struct binder_ref_data - binder_ref counts and id
420 * @debug_id: unique ID for the ref
421 * @desc: unique userspace handle for ref
422 * @strong: strong ref count (debugging only if not locked)
423 * @weak: weak ref count (debugging only if not locked)
424 *
425 * Structure to hold ref count and ref id information. Since
426 * the actual ref can only be accessed with a lock, this structure
427 * is used to return information about the ref to callers of
428 * ref inc/dec functions.
429 */
430struct binder_ref_data {
431 int debug_id;
432 uint32_t desc;
433 int strong;
434 int weak;
435};
436
437/**
438 * struct binder_ref - struct to track references on nodes
439 * @data: binder_ref_data containing id, handle, and current refcounts
440 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
441 * @rb_node_node: node for lookup by @node in proc's rb_tree
442 * @node_entry: list entry for node->refs list in target node
Todd Kjos673068e2017-06-29 12:02:03 -0700443 * (protected by @node->lock)
Todd Kjos372e3142017-06-29 12:01:58 -0700444 * @proc: binder_proc containing ref
445 * @node: binder_node of target node. When cleaning up a
446 * ref for deletion in binder_cleanup_ref, a non-NULL
447 * @node indicates the node must be freed
448 * @death: pointer to death notification (ref_death) if requested
Martijn Coenenab51ec62017-06-29 12:02:10 -0700449 * (protected by @node->lock)
Todd Kjos372e3142017-06-29 12:01:58 -0700450 *
451 * Structure to track references from procA to target node (on procB). This
452 * structure is unsafe to access without holding @proc->outer_lock.
453 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900454struct binder_ref {
455 /* Lookups needed: */
456 /* node + proc => ref (transaction) */
457 /* desc + proc => ref (transaction, inc/dec ref) */
458 /* node => refs + procs (proc exit) */
Todd Kjos372e3142017-06-29 12:01:58 -0700459 struct binder_ref_data data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900460 struct rb_node rb_node_desc;
461 struct rb_node rb_node_node;
462 struct hlist_node node_entry;
463 struct binder_proc *proc;
464 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900465 struct binder_ref_death *death;
466};
467
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900468enum binder_deferred_state {
469 BINDER_DEFERRED_PUT_FILES = 0x01,
470 BINDER_DEFERRED_FLUSH = 0x02,
471 BINDER_DEFERRED_RELEASE = 0x04,
472};
473
Todd Kjos9630fe82017-06-29 12:02:00 -0700474/**
Martijn Coenence388e02017-06-06 17:04:42 -0700475 * struct binder_priority - scheduler policy and priority
476 * @sched_policy scheduler policy
477 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
478 *
479 * The binder driver supports inheriting the following scheduler policies:
480 * SCHED_NORMAL
481 * SCHED_BATCH
482 * SCHED_FIFO
483 * SCHED_RR
484 */
485struct binder_priority {
486 unsigned int sched_policy;
487 int prio;
488};
489
490/**
Todd Kjos9630fe82017-06-29 12:02:00 -0700491 * struct binder_proc - binder process bookkeeping
492 * @proc_node: element for binder_procs list
493 * @threads: rbtree of binder_threads in this proc
Todd Kjos7bd7b0e2017-06-29 12:02:05 -0700494 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700495 * @nodes: rbtree of binder nodes associated with
496 * this proc ordered by node->ptr
Todd Kjosda0fa9e2017-06-29 12:02:04 -0700497 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700498 * @refs_by_desc: rbtree of refs ordered by ref->desc
Todd Kjos2c1838d2017-06-29 12:02:08 -0700499 * (protected by @outer_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700500 * @refs_by_node: rbtree of refs ordered by ref->node
Todd Kjos2c1838d2017-06-29 12:02:08 -0700501 * (protected by @outer_lock)
Martijn Coenen1b77e9d2017-08-31 10:04:18 +0200502 * @waiting_threads: threads currently waiting for proc work
503 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700504 * @pid PID of group_leader of process
505 * (invariant after initialized)
506 * @tsk task_struct for group_leader of process
507 * (invariant after initialized)
508 * @files files_struct for process
Todd Kjos7f3dc002017-11-27 09:32:33 -0800509 * (protected by @files_lock)
510 * @files_lock mutex to protect @files
Todd Kjos9630fe82017-06-29 12:02:00 -0700511 * @deferred_work_node: element for binder_deferred_list
512 * (protected by binder_deferred_lock)
513 * @deferred_work: bitmap of deferred work to perform
514 * (protected by binder_deferred_lock)
515 * @is_dead: process is dead and awaiting free
516 * when outstanding transactions are cleaned up
Todd Kjos7bd7b0e2017-06-29 12:02:05 -0700517 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700518 * @todo: list of work for this process
Todd Kjos72196392017-06-29 12:02:02 -0700519 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700520 * @stats: per-process binder statistics
521 * (atomics, no lock needed)
522 * @delivered_death: list of delivered death notification
Todd Kjos72196392017-06-29 12:02:02 -0700523 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700524 * @max_threads: cap on number of binder threads
Todd Kjosb3e68612017-06-29 12:02:07 -0700525 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700526 * @requested_threads: number of binder threads requested but not
527 * yet started. In current implementation, can
528 * only be 0 or 1.
Todd Kjosb3e68612017-06-29 12:02:07 -0700529 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700530 * @requested_threads_started: number binder threads started
Todd Kjosb3e68612017-06-29 12:02:07 -0700531 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700532 * @tmp_ref: temporary reference to indicate proc is in use
Todd Kjos7bd7b0e2017-06-29 12:02:05 -0700533 * (protected by @inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700534 * @default_priority: default scheduler priority
535 * (invariant after initialized)
536 * @debugfs_entry: debugfs node
537 * @alloc: binder allocator bookkeeping
538 * @context: binder_context for this proc
539 * (invariant after initialized)
540 * @inner_lock: can nest under outer_lock and/or node lock
541 * @outer_lock: no nesting under innor or node lock
542 * Lock order: 1) outer, 2) node, 3) inner
543 *
544 * Bookkeeping structure for binder processes
545 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900546struct binder_proc {
547 struct hlist_node proc_node;
548 struct rb_root threads;
549 struct rb_root nodes;
550 struct rb_root refs_by_desc;
551 struct rb_root refs_by_node;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +0200552 struct list_head waiting_threads;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900553 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900554 struct task_struct *tsk;
555 struct files_struct *files;
Todd Kjos7f3dc002017-11-27 09:32:33 -0800556 struct mutex files_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900557 struct hlist_node deferred_work_node;
558 int deferred_work;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700559 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900560
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900561 struct list_head todo;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900562 struct binder_stats stats;
563 struct list_head delivered_death;
564 int max_threads;
565 int requested_threads;
566 int requested_threads_started;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700567 int tmp_ref;
Martijn Coenence388e02017-06-06 17:04:42 -0700568 struct binder_priority default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700569 struct dentry *debugfs_entry;
Todd Kjosfdfb4a92017-06-29 12:01:38 -0700570 struct binder_alloc alloc;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800571 struct binder_context *context;
Todd Kjos9630fe82017-06-29 12:02:00 -0700572 spinlock_t inner_lock;
573 spinlock_t outer_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900574};
575
576enum {
577 BINDER_LOOPER_STATE_REGISTERED = 0x01,
578 BINDER_LOOPER_STATE_ENTERED = 0x02,
579 BINDER_LOOPER_STATE_EXITED = 0x04,
580 BINDER_LOOPER_STATE_INVALID = 0x08,
581 BINDER_LOOPER_STATE_WAITING = 0x10,
Martijn Coenen1b77e9d2017-08-31 10:04:18 +0200582 BINDER_LOOPER_STATE_POLL = 0x20,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900583};
584
Todd Kjos9630fe82017-06-29 12:02:00 -0700585/**
586 * struct binder_thread - binder thread bookkeeping
587 * @proc: binder process for this thread
588 * (invariant after initialization)
589 * @rb_node: element for proc->threads rbtree
Todd Kjos7bd7b0e2017-06-29 12:02:05 -0700590 * (protected by @proc->inner_lock)
Martijn Coenen1b77e9d2017-08-31 10:04:18 +0200591 * @waiting_thread_node: element for @proc->waiting_threads list
592 * (protected by @proc->inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700593 * @pid: PID for this thread
594 * (invariant after initialization)
595 * @looper: bitmap of looping state
596 * (only accessed by this thread)
597 * @looper_needs_return: looping thread needs to exit driver
598 * (no lock needed)
599 * @transaction_stack: stack of in-progress transactions for this thread
Martijn Coenen0b89d692017-06-29 12:02:06 -0700600 * (protected by @proc->inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700601 * @todo: list of work to do for this thread
Todd Kjos72196392017-06-29 12:02:02 -0700602 * (protected by @proc->inner_lock)
Martijn Coenen148ade22017-11-15 09:21:35 +0100603 * @process_todo: whether work in @todo should be processed
604 * (protected by @proc->inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700605 * @return_error: transaction errors reported by this thread
606 * (only accessed by this thread)
607 * @reply_error: transaction errors reported by target thread
Martijn Coenen0b89d692017-06-29 12:02:06 -0700608 * (protected by @proc->inner_lock)
Todd Kjos9630fe82017-06-29 12:02:00 -0700609 * @wait: wait queue for thread work
610 * @stats: per-thread statistics
611 * (atomics, no lock needed)
612 * @tmp_ref: temporary reference to indicate thread is in use
613 * (atomic since @proc->inner_lock cannot
614 * always be acquired)
615 * @is_dead: thread is dead and awaiting free
616 * when outstanding transactions are cleaned up
Todd Kjos7bd7b0e2017-06-29 12:02:05 -0700617 * (protected by @proc->inner_lock)
Martijn Coenence388e02017-06-06 17:04:42 -0700618 * @task: struct task_struct for this thread
Todd Kjos9630fe82017-06-29 12:02:00 -0700619 *
620 * Bookkeeping structure for binder threads.
621 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900622struct binder_thread {
623 struct binder_proc *proc;
624 struct rb_node rb_node;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +0200625 struct list_head waiting_thread_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900626 int pid;
Todd Kjos08dabce2017-06-29 12:01:49 -0700627 int looper; /* only modified by this thread */
628 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900629 struct binder_transaction *transaction_stack;
630 struct list_head todo;
Martijn Coenen148ade22017-11-15 09:21:35 +0100631 bool process_todo;
Todd Kjos26549d12017-06-29 12:01:55 -0700632 struct binder_error return_error;
633 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900634 wait_queue_head_t wait;
635 struct binder_stats stats;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700636 atomic_t tmp_ref;
637 bool is_dead;
Martijn Coenence388e02017-06-06 17:04:42 -0700638 struct task_struct *task;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900639};
640
641struct binder_transaction {
642 int debug_id;
643 struct binder_work work;
644 struct binder_thread *from;
645 struct binder_transaction *from_parent;
646 struct binder_proc *to_proc;
647 struct binder_thread *to_thread;
648 struct binder_transaction *to_parent;
649 unsigned need_reply:1;
650 /* unsigned is_dead:1; */ /* not used at the moment */
651
652 struct binder_buffer *buffer;
653 unsigned int code;
654 unsigned int flags;
Martijn Coenence388e02017-06-06 17:04:42 -0700655 struct binder_priority priority;
656 struct binder_priority saved_priority;
657 bool set_priority_called;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600658 kuid_t sender_euid;
Todd Kjos00bac142019-01-14 09:10:21 -0800659 binder_uintptr_t security_ctx;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700660 /**
661 * @lock: protects @from, @to_proc, and @to_thread
662 *
663 * @from, @to_proc, and @to_thread can be set to NULL
664 * during thread teardown
665 */
666 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900667};
668
Todd Kjos9630fe82017-06-29 12:02:00 -0700669/**
Todd Kjosa08646b2019-02-08 10:35:16 -0800670 * struct binder_object - union of flat binder object types
671 * @hdr: generic object header
672 * @fbo: binder object (nodes and refs)
673 * @fdo: file descriptor object
674 * @bbo: binder buffer pointer
675 * @fdao: file descriptor array
676 *
677 * Used for type-independent object copies
678 */
679struct binder_object {
680 union {
681 struct binder_object_header hdr;
682 struct flat_binder_object fbo;
683 struct binder_fd_object fdo;
684 struct binder_buffer_object bbo;
685 struct binder_fd_array_object fdao;
686 };
687};
688
689/**
Todd Kjos9630fe82017-06-29 12:02:00 -0700690 * binder_proc_lock() - Acquire outer lock for given binder_proc
691 * @proc: struct binder_proc to acquire
692 *
693 * Acquires proc->outer_lock. Used to protect binder_ref
694 * structures associated with the given proc.
695 */
696#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
697static void
698_binder_proc_lock(struct binder_proc *proc, int line)
699{
700 binder_debug(BINDER_DEBUG_SPINLOCKS,
701 "%s: line=%d\n", __func__, line);
702 spin_lock(&proc->outer_lock);
703}
704
705/**
706 * binder_proc_unlock() - Release spinlock for given binder_proc
707 * @proc: struct binder_proc to acquire
708 *
709 * Release lock acquired via binder_proc_lock()
710 */
711#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
712static void
713_binder_proc_unlock(struct binder_proc *proc, int line)
714{
715 binder_debug(BINDER_DEBUG_SPINLOCKS,
716 "%s: line=%d\n", __func__, line);
717 spin_unlock(&proc->outer_lock);
718}
719
720/**
721 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
722 * @proc: struct binder_proc to acquire
723 *
724 * Acquires proc->inner_lock. Used to protect todo lists
725 */
726#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
727static void
728_binder_inner_proc_lock(struct binder_proc *proc, int line)
729{
730 binder_debug(BINDER_DEBUG_SPINLOCKS,
731 "%s: line=%d\n", __func__, line);
732 spin_lock(&proc->inner_lock);
733}
734
735/**
736 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
737 * @proc: struct binder_proc to acquire
738 *
739 * Release lock acquired via binder_inner_proc_lock()
740 */
741#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
742static void
743_binder_inner_proc_unlock(struct binder_proc *proc, int line)
744{
745 binder_debug(BINDER_DEBUG_SPINLOCKS,
746 "%s: line=%d\n", __func__, line);
747 spin_unlock(&proc->inner_lock);
748}
749
750/**
751 * binder_node_lock() - Acquire spinlock for given binder_node
752 * @node: struct binder_node to acquire
753 *
754 * Acquires node->lock. Used to protect binder_node fields
755 */
756#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
757static void
758_binder_node_lock(struct binder_node *node, int line)
759{
760 binder_debug(BINDER_DEBUG_SPINLOCKS,
761 "%s: line=%d\n", __func__, line);
762 spin_lock(&node->lock);
763}
764
765/**
766 * binder_node_unlock() - Release spinlock for given binder_proc
767 * @node: struct binder_node to acquire
768 *
769 * Release lock acquired via binder_node_lock()
770 */
771#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
772static void
773_binder_node_unlock(struct binder_node *node, int line)
774{
775 binder_debug(BINDER_DEBUG_SPINLOCKS,
776 "%s: line=%d\n", __func__, line);
777 spin_unlock(&node->lock);
778}
779
Todd Kjos673068e2017-06-29 12:02:03 -0700780/**
781 * binder_node_inner_lock() - Acquire node and inner locks
782 * @node: struct binder_node to acquire
783 *
784 * Acquires node->lock. If node->proc also acquires
785 * proc->inner_lock. Used to protect binder_node fields
786 */
787#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
788static void
789_binder_node_inner_lock(struct binder_node *node, int line)
790{
791 binder_debug(BINDER_DEBUG_SPINLOCKS,
792 "%s: line=%d\n", __func__, line);
793 spin_lock(&node->lock);
794 if (node->proc)
795 binder_inner_proc_lock(node->proc);
796}
797
798/**
799 * binder_node_unlock() - Release node and inner locks
800 * @node: struct binder_node to acquire
801 *
802 * Release lock acquired via binder_node_lock()
803 */
804#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
805static void
806_binder_node_inner_unlock(struct binder_node *node, int line)
807{
808 struct binder_proc *proc = node->proc;
809
810 binder_debug(BINDER_DEBUG_SPINLOCKS,
811 "%s: line=%d\n", __func__, line);
812 if (proc)
813 binder_inner_proc_unlock(proc);
814 spin_unlock(&node->lock);
815}
816
Todd Kjos72196392017-06-29 12:02:02 -0700817static bool binder_worklist_empty_ilocked(struct list_head *list)
818{
819 return list_empty(list);
820}
821
822/**
823 * binder_worklist_empty() - Check if no items on the work list
824 * @proc: binder_proc associated with list
825 * @list: list to check
826 *
827 * Return: true if there are no items on list, else false
828 */
829static bool binder_worklist_empty(struct binder_proc *proc,
830 struct list_head *list)
831{
832 bool ret;
833
834 binder_inner_proc_lock(proc);
835 ret = binder_worklist_empty_ilocked(list);
836 binder_inner_proc_unlock(proc);
837 return ret;
838}
839
Martijn Coenen148ade22017-11-15 09:21:35 +0100840/**
841 * binder_enqueue_work_ilocked() - Add an item to the work list
842 * @work: struct binder_work to add to list
843 * @target_list: list to add work to
844 *
845 * Adds the work to the specified list. Asserts that work
846 * is not already on a list.
847 *
848 * Requires the proc->inner_lock to be held.
849 */
Todd Kjos72196392017-06-29 12:02:02 -0700850static void
851binder_enqueue_work_ilocked(struct binder_work *work,
852 struct list_head *target_list)
853{
854 BUG_ON(target_list == NULL);
855 BUG_ON(work->entry.next && !list_empty(&work->entry));
856 list_add_tail(&work->entry, target_list);
857}
858
859/**
Martijn Coenen148ade22017-11-15 09:21:35 +0100860 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
861 * @thread: thread to queue work to
Todd Kjos72196392017-06-29 12:02:02 -0700862 * @work: struct binder_work to add to list
Todd Kjos72196392017-06-29 12:02:02 -0700863 *
Martijn Coenen148ade22017-11-15 09:21:35 +0100864 * Adds the work to the todo list of the thread. Doesn't set the process_todo
865 * flag, which means that (if it wasn't already set) the thread will go to
866 * sleep without handling this work when it calls read.
867 *
868 * Requires the proc->inner_lock to be held.
Todd Kjos72196392017-06-29 12:02:02 -0700869 */
870static void
Martijn Coenen148ade22017-11-15 09:21:35 +0100871binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
872 struct binder_work *work)
Todd Kjos72196392017-06-29 12:02:02 -0700873{
Sherry Yangb0cb2d82018-08-13 17:28:53 -0700874 WARN_ON(!list_empty(&thread->waiting_thread_node));
Martijn Coenen148ade22017-11-15 09:21:35 +0100875 binder_enqueue_work_ilocked(work, &thread->todo);
876}
877
878/**
879 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
880 * @thread: thread to queue work to
881 * @work: struct binder_work to add to list
882 *
883 * Adds the work to the todo list of the thread, and enables processing
884 * of the todo queue.
885 *
886 * Requires the proc->inner_lock to be held.
887 */
888static void
889binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
890 struct binder_work *work)
891{
Sherry Yangb0cb2d82018-08-13 17:28:53 -0700892 WARN_ON(!list_empty(&thread->waiting_thread_node));
Martijn Coenen148ade22017-11-15 09:21:35 +0100893 binder_enqueue_work_ilocked(work, &thread->todo);
894 thread->process_todo = true;
895}
896
897/**
898 * binder_enqueue_thread_work() - Add an item to the thread work list
899 * @thread: thread to queue work to
900 * @work: struct binder_work to add to list
901 *
902 * Adds the work to the todo list of the thread, and enables processing
903 * of the todo queue.
904 */
905static void
906binder_enqueue_thread_work(struct binder_thread *thread,
907 struct binder_work *work)
908{
909 binder_inner_proc_lock(thread->proc);
910 binder_enqueue_thread_work_ilocked(thread, work);
911 binder_inner_proc_unlock(thread->proc);
Todd Kjos72196392017-06-29 12:02:02 -0700912}
913
914static void
915binder_dequeue_work_ilocked(struct binder_work *work)
916{
917 list_del_init(&work->entry);
918}
919
920/**
921 * binder_dequeue_work() - Removes an item from the work list
922 * @proc: binder_proc associated with list
923 * @work: struct binder_work to remove from list
924 *
925 * Removes the specified work item from whatever list it is on.
926 * Can safely be called if work is not on any list.
927 */
928static void
929binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
930{
931 binder_inner_proc_lock(proc);
932 binder_dequeue_work_ilocked(work);
933 binder_inner_proc_unlock(proc);
934}
935
936static struct binder_work *binder_dequeue_work_head_ilocked(
937 struct list_head *list)
938{
939 struct binder_work *w;
940
941 w = list_first_entry_or_null(list, struct binder_work, entry);
942 if (w)
943 list_del_init(&w->entry);
944 return w;
945}
946
947/**
948 * binder_dequeue_work_head() - Dequeues the item at head of list
949 * @proc: binder_proc associated with list
950 * @list: list to dequeue head
951 *
952 * Removes the head of the list if there are items on the list
953 *
954 * Return: pointer dequeued binder_work, NULL if list was empty
955 */
956static struct binder_work *binder_dequeue_work_head(
957 struct binder_proc *proc,
958 struct list_head *list)
959{
960 struct binder_work *w;
961
962 binder_inner_proc_lock(proc);
963 w = binder_dequeue_work_head_ilocked(list);
964 binder_inner_proc_unlock(proc);
965 return w;
966}
967
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900968static void
969binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos7a4408c2017-06-29 12:01:57 -0700970static void binder_free_thread(struct binder_thread *thread);
971static void binder_free_proc(struct binder_proc *proc);
Todd Kjosda0fa9e2017-06-29 12:02:04 -0700972static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900973
Sachin Kamatefde99c2012-08-17 16:39:36 +0530974static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900975{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900976 unsigned long rlim_cur;
977 unsigned long irqs;
Todd Kjos7f3dc002017-11-27 09:32:33 -0800978 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900979
Todd Kjos7f3dc002017-11-27 09:32:33 -0800980 mutex_lock(&proc->files_lock);
981 if (proc->files == NULL) {
982 ret = -ESRCH;
983 goto err;
984 }
985 if (!lock_task_sighand(proc->tsk, &irqs)) {
986 ret = -EMFILE;
987 goto err;
988 }
Al Virodcfadfa2012-08-12 17:27:30 -0400989 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
990 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900991
Todd Kjos7f3dc002017-11-27 09:32:33 -0800992 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
993err:
994 mutex_unlock(&proc->files_lock);
995 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900996}
997
998/*
999 * copied from fd_install
1000 */
1001static void task_fd_install(
1002 struct binder_proc *proc, unsigned int fd, struct file *file)
1003{
Todd Kjos7f3dc002017-11-27 09:32:33 -08001004 mutex_lock(&proc->files_lock);
Al Virof869e8a2012-08-15 21:06:33 -04001005 if (proc->files)
1006 __fd_install(proc->files, fd, file);
Todd Kjos7f3dc002017-11-27 09:32:33 -08001007 mutex_unlock(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001008}
1009
1010/*
1011 * copied from sys_close
1012 */
1013static long task_close_fd(struct binder_proc *proc, unsigned int fd)
1014{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001015 int retval;
1016
Todd Kjos7f3dc002017-11-27 09:32:33 -08001017 mutex_lock(&proc->files_lock);
1018 if (proc->files == NULL) {
1019 retval = -ESRCH;
1020 goto err;
1021 }
Al Viro483ce1d2012-08-19 12:04:24 -04001022 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001023 /* can't restart close syscall because file table entry was cleared */
1024 if (unlikely(retval == -ERESTARTSYS ||
1025 retval == -ERESTARTNOINTR ||
1026 retval == -ERESTARTNOHAND ||
1027 retval == -ERESTART_RESTARTBLOCK))
1028 retval = -EINTR;
Todd Kjos7f3dc002017-11-27 09:32:33 -08001029err:
1030 mutex_unlock(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001031 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001032}
1033
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02001034static bool binder_has_work_ilocked(struct binder_thread *thread,
1035 bool do_proc_work)
1036{
Martijn Coenen148ade22017-11-15 09:21:35 +01001037 return thread->process_todo ||
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02001038 thread->looper_need_return ||
1039 (do_proc_work &&
1040 !binder_worklist_empty_ilocked(&thread->proc->todo));
1041}
1042
1043static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1044{
1045 bool has_work;
1046
1047 binder_inner_proc_lock(thread->proc);
1048 has_work = binder_has_work_ilocked(thread, do_proc_work);
1049 binder_inner_proc_unlock(thread->proc);
1050
1051 return has_work;
1052}
1053
1054static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1055{
1056 return !thread->transaction_stack &&
1057 binder_worklist_empty_ilocked(&thread->todo) &&
1058 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1059 BINDER_LOOPER_STATE_REGISTERED));
1060}
1061
1062static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1063 bool sync)
1064{
1065 struct rb_node *n;
1066 struct binder_thread *thread;
1067
1068 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1069 thread = rb_entry(n, struct binder_thread, rb_node);
1070 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1071 binder_available_for_proc_work_ilocked(thread)) {
1072 if (sync)
1073 wake_up_interruptible_sync(&thread->wait);
1074 else
1075 wake_up_interruptible(&thread->wait);
1076 }
1077 }
1078}
1079
Martijn Coenen408c68b2017-08-31 10:04:19 +02001080/**
1081 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1082 * @proc: process to select a thread from
1083 *
1084 * Note that calling this function moves the thread off the waiting_threads
1085 * list, so it can only be woken up by the caller of this function, or a
1086 * signal. Therefore, callers *should* always wake up the thread this function
1087 * returns.
1088 *
1089 * Return: If there's a thread currently waiting for process work,
1090 * returns that thread. Otherwise returns NULL.
1091 */
1092static struct binder_thread *
1093binder_select_thread_ilocked(struct binder_proc *proc)
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02001094{
1095 struct binder_thread *thread;
1096
Martijn Coenen858b2712017-08-31 10:04:26 +02001097 assert_spin_locked(&proc->inner_lock);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02001098 thread = list_first_entry_or_null(&proc->waiting_threads,
1099 struct binder_thread,
1100 waiting_thread_node);
1101
Martijn Coenen408c68b2017-08-31 10:04:19 +02001102 if (thread)
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02001103 list_del_init(&thread->waiting_thread_node);
Martijn Coenen408c68b2017-08-31 10:04:19 +02001104
1105 return thread;
1106}
1107
1108/**
1109 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1110 * @proc: process to wake up a thread in
1111 * @thread: specific thread to wake-up (may be NULL)
1112 * @sync: whether to do a synchronous wake-up
1113 *
1114 * This function wakes up a thread in the @proc process.
1115 * The caller may provide a specific thread to wake-up in
1116 * the @thread parameter. If @thread is NULL, this function
1117 * will wake up threads that have called poll().
1118 *
1119 * Note that for this function to work as expected, callers
1120 * should first call binder_select_thread() to find a thread
1121 * to handle the work (if they don't have a thread already),
1122 * and pass the result into the @thread parameter.
1123 */
1124static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1125 struct binder_thread *thread,
1126 bool sync)
1127{
Martijn Coenen858b2712017-08-31 10:04:26 +02001128 assert_spin_locked(&proc->inner_lock);
Martijn Coenen408c68b2017-08-31 10:04:19 +02001129
1130 if (thread) {
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02001131 if (sync)
1132 wake_up_interruptible_sync(&thread->wait);
1133 else
1134 wake_up_interruptible(&thread->wait);
1135 return;
1136 }
1137
1138 /* Didn't find a thread waiting for proc work; this can happen
1139 * in two scenarios:
1140 * 1. All threads are busy handling transactions
1141 * In that case, one of those threads should call back into
1142 * the kernel driver soon and pick up this work.
1143 * 2. Threads are using the (e)poll interface, in which case
1144 * they may be blocked on the waitqueue without having been
1145 * added to waiting_threads. For this case, we just iterate
1146 * over all threads not handling transaction work, and
1147 * wake them all up. We wake all because we don't know whether
1148 * a thread that called into (e)poll is handling non-binder
1149 * work currently.
1150 */
1151 binder_wakeup_poll_threads_ilocked(proc, sync);
1152}
1153
Martijn Coenen408c68b2017-08-31 10:04:19 +02001154static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1155{
1156 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1157
1158 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1159}
1160
Martijn Coenence388e02017-06-06 17:04:42 -07001161static bool is_rt_policy(int policy)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001162{
Martijn Coenence388e02017-06-06 17:04:42 -07001163 return policy == SCHED_FIFO || policy == SCHED_RR;
1164}
Seunghun Lee10f62862014-05-01 01:30:23 +09001165
Martijn Coenence388e02017-06-06 17:04:42 -07001166static bool is_fair_policy(int policy)
1167{
1168 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1169}
1170
1171static bool binder_supported_policy(int policy)
1172{
1173 return is_fair_policy(policy) || is_rt_policy(policy);
1174}
1175
1176static int to_userspace_prio(int policy, int kernel_priority)
1177{
1178 if (is_fair_policy(policy))
1179 return PRIO_TO_NICE(kernel_priority);
1180 else
1181 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1182}
1183
1184static int to_kernel_prio(int policy, int user_priority)
1185{
1186 if (is_fair_policy(policy))
1187 return NICE_TO_PRIO(user_priority);
1188 else
1189 return MAX_USER_RT_PRIO - 1 - user_priority;
1190}
1191
1192static void binder_do_set_priority(struct task_struct *task,
1193 struct binder_priority desired,
1194 bool verify)
1195{
1196 int priority; /* user-space prio value */
1197 bool has_cap_nice;
1198 unsigned int policy = desired.sched_policy;
1199
1200 if (task->policy == policy && task->normal_prio == desired.prio)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001201 return;
Martijn Coenence388e02017-06-06 17:04:42 -07001202
1203 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1204
1205 priority = to_userspace_prio(policy, desired.prio);
1206
1207 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1208 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1209
1210 if (max_rtprio == 0) {
1211 policy = SCHED_NORMAL;
1212 priority = MIN_NICE;
1213 } else if (priority > max_rtprio) {
1214 priority = max_rtprio;
1215 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001216 }
Martijn Coenence388e02017-06-06 17:04:42 -07001217
1218 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1219 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1220
1221 if (min_nice > MAX_NICE) {
1222 binder_user_error("%d RLIMIT_NICE not set\n",
1223 task->pid);
1224 return;
1225 } else if (priority < min_nice) {
1226 priority = min_nice;
1227 }
1228 }
1229
1230 if (policy != desired.sched_policy ||
1231 to_kernel_prio(policy, priority) != desired.prio)
1232 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1233 "%d: priority %d not allowed, using %d instead\n",
1234 task->pid, desired.prio,
1235 to_kernel_prio(policy, priority));
1236
1237 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1238 to_kernel_prio(policy, priority),
1239 desired.prio);
1240
1241 /* Set the actual priority */
1242 if (task->policy != policy || is_rt_policy(policy)) {
1243 struct sched_param params;
1244
1245 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1246
1247 sched_setscheduler_nocheck(task,
1248 policy | SCHED_RESET_ON_FORK,
1249 &params);
1250 }
1251 if (is_fair_policy(policy))
1252 set_user_nice(task, priority);
1253}
1254
1255static void binder_set_priority(struct task_struct *task,
1256 struct binder_priority desired)
1257{
1258 binder_do_set_priority(task, desired, /* verify = */ true);
1259}
1260
1261static void binder_restore_priority(struct task_struct *task,
1262 struct binder_priority desired)
1263{
1264 binder_do_set_priority(task, desired, /* verify = */ false);
1265}
1266
1267static void binder_transaction_priority(struct task_struct *task,
1268 struct binder_transaction *t,
1269 struct binder_priority node_prio,
1270 bool inherit_rt)
1271{
1272 struct binder_priority desired_prio = t->priority;
1273
1274 if (t->set_priority_called)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001275 return;
Martijn Coenence388e02017-06-06 17:04:42 -07001276
1277 t->set_priority_called = true;
1278 t->saved_priority.sched_policy = task->policy;
1279 t->saved_priority.prio = task->normal_prio;
1280
1281 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1282 desired_prio.prio = NICE_TO_PRIO(0);
1283 desired_prio.sched_policy = SCHED_NORMAL;
1284 }
1285
1286 if (node_prio.prio < t->priority.prio ||
1287 (node_prio.prio == t->priority.prio &&
1288 node_prio.sched_policy == SCHED_FIFO)) {
1289 /*
1290 * In case the minimum priority on the node is
1291 * higher (lower value), use that priority. If
1292 * the priority is the same, but the node uses
1293 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1294 * run unbounded, unlike SCHED_RR.
1295 */
1296 desired_prio = node_prio;
1297 }
1298
1299 binder_set_priority(task, desired_prio);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001300}
1301
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001302static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1303 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001304{
1305 struct rb_node *n = proc->nodes.rb_node;
1306 struct binder_node *node;
1307
Martijn Coenen858b2712017-08-31 10:04:26 +02001308 assert_spin_locked(&proc->inner_lock);
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001309
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001310 while (n) {
1311 node = rb_entry(n, struct binder_node, rb_node);
1312
1313 if (ptr < node->ptr)
1314 n = n->rb_left;
1315 else if (ptr > node->ptr)
1316 n = n->rb_right;
Todd Kjosadc18842017-06-29 12:01:59 -07001317 else {
1318 /*
1319 * take an implicit weak reference
1320 * to ensure node stays alive until
1321 * call to binder_put_node()
1322 */
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001323 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001324 return node;
Todd Kjosadc18842017-06-29 12:01:59 -07001325 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001326 }
1327 return NULL;
1328}
1329
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001330static struct binder_node *binder_get_node(struct binder_proc *proc,
1331 binder_uintptr_t ptr)
1332{
1333 struct binder_node *node;
1334
1335 binder_inner_proc_lock(proc);
1336 node = binder_get_node_ilocked(proc, ptr);
1337 binder_inner_proc_unlock(proc);
1338 return node;
1339}
1340
1341static struct binder_node *binder_init_node_ilocked(
1342 struct binder_proc *proc,
1343 struct binder_node *new_node,
1344 struct flat_binder_object *fp)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001345{
1346 struct rb_node **p = &proc->nodes.rb_node;
1347 struct rb_node *parent = NULL;
1348 struct binder_node *node;
Todd Kjos673068e2017-06-29 12:02:03 -07001349 binder_uintptr_t ptr = fp ? fp->binder : 0;
1350 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1351 __u32 flags = fp ? fp->flags : 0;
Martijn Coenence388e02017-06-06 17:04:42 -07001352 s8 priority;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001353
Martijn Coenen858b2712017-08-31 10:04:26 +02001354 assert_spin_locked(&proc->inner_lock);
1355
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001356 while (*p) {
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001357
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001358 parent = *p;
1359 node = rb_entry(parent, struct binder_node, rb_node);
1360
1361 if (ptr < node->ptr)
1362 p = &(*p)->rb_left;
1363 else if (ptr > node->ptr)
1364 p = &(*p)->rb_right;
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001365 else {
1366 /*
1367 * A matching node is already in
1368 * the rb tree. Abandon the init
1369 * and return it.
1370 */
1371 binder_inc_node_tmpref_ilocked(node);
1372 return node;
1373 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001374 }
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001375 node = new_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001376 binder_stats_created(BINDER_STAT_NODE);
Todd Kjosadc18842017-06-29 12:01:59 -07001377 node->tmp_refs++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001378 rb_link_node(&node->rb_node, parent, p);
1379 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjos656a8002017-06-29 12:01:45 -07001380 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001381 node->proc = proc;
1382 node->ptr = ptr;
1383 node->cookie = cookie;
1384 node->work.type = BINDER_WORK_NODE;
Martijn Coenence388e02017-06-06 17:04:42 -07001385 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1386 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1387 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1388 node->min_priority = to_kernel_prio(node->sched_policy, priority);
Todd Kjos673068e2017-06-29 12:02:03 -07001389 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
Martijn Coenence388e02017-06-06 17:04:42 -07001390 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
Todd Kjos00bac142019-01-14 09:10:21 -08001391 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
Todd Kjos9630fe82017-06-29 12:02:00 -07001392 spin_lock_init(&node->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001393 INIT_LIST_HEAD(&node->work.entry);
1394 INIT_LIST_HEAD(&node->async_todo);
1395 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001396 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001397 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001398 (u64)node->ptr, (u64)node->cookie);
Todd Kjosda0fa9e2017-06-29 12:02:04 -07001399
1400 return node;
1401}
1402
1403static struct binder_node *binder_new_node(struct binder_proc *proc,
1404 struct flat_binder_object *fp)
1405{
1406 struct binder_node *node;
1407 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1408
1409 if (!new_node)
1410 return NULL;
1411 binder_inner_proc_lock(proc);
1412 node = binder_init_node_ilocked(proc, new_node, fp);
1413 binder_inner_proc_unlock(proc);
1414 if (node != new_node)
1415 /*
1416 * The node was already added by another thread
1417 */
1418 kfree(new_node);
1419
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001420 return node;
1421}
1422
Todd Kjosed297212017-06-29 12:02:01 -07001423static void binder_free_node(struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001424{
Todd Kjosed297212017-06-29 12:02:01 -07001425 kfree(node);
1426 binder_stats_deleted(BINDER_STAT_NODE);
1427}
1428
Todd Kjos673068e2017-06-29 12:02:03 -07001429static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1430 int internal,
1431 struct list_head *target_list)
Todd Kjosed297212017-06-29 12:02:01 -07001432{
Todd Kjos673068e2017-06-29 12:02:03 -07001433 struct binder_proc *proc = node->proc;
1434
Martijn Coenen858b2712017-08-31 10:04:26 +02001435 assert_spin_locked(&node->lock);
Todd Kjos673068e2017-06-29 12:02:03 -07001436 if (proc)
Martijn Coenen858b2712017-08-31 10:04:26 +02001437 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001438 if (strong) {
1439 if (internal) {
1440 if (target_list == NULL &&
1441 node->internal_strong_refs == 0 &&
Martijn Coenen342e5c92017-02-03 14:40:46 -08001442 !(node->proc &&
1443 node == node->proc->context->binder_context_mgr_node &&
1444 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301445 pr_err("invalid inc strong node for %d\n",
1446 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001447 return -EINVAL;
1448 }
1449 node->internal_strong_refs++;
1450 } else
1451 node->local_strong_refs++;
1452 if (!node->has_strong_ref && target_list) {
Sherry Yangb0cb2d82018-08-13 17:28:53 -07001453 struct binder_thread *thread = container_of(target_list,
1454 struct binder_thread, todo);
Todd Kjos72196392017-06-29 12:02:02 -07001455 binder_dequeue_work_ilocked(&node->work);
Sherry Yangb0cb2d82018-08-13 17:28:53 -07001456 BUG_ON(&thread->todo != target_list);
1457 binder_enqueue_deferred_thread_work_ilocked(thread,
1458 &node->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001459 }
1460 } else {
1461 if (!internal)
1462 node->local_weak_refs++;
1463 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1464 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301465 pr_err("invalid inc weak node for %d\n",
1466 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001467 return -EINVAL;
1468 }
Martijn Coenen148ade22017-11-15 09:21:35 +01001469 /*
1470 * See comment above
1471 */
Todd Kjos72196392017-06-29 12:02:02 -07001472 binder_enqueue_work_ilocked(&node->work, target_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001473 }
1474 }
1475 return 0;
1476}
1477
Todd Kjosed297212017-06-29 12:02:01 -07001478static int binder_inc_node(struct binder_node *node, int strong, int internal,
1479 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001480{
Todd Kjosed297212017-06-29 12:02:01 -07001481 int ret;
1482
Todd Kjos673068e2017-06-29 12:02:03 -07001483 binder_node_inner_lock(node);
1484 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1485 binder_node_inner_unlock(node);
Todd Kjosed297212017-06-29 12:02:01 -07001486
1487 return ret;
1488}
1489
Todd Kjos673068e2017-06-29 12:02:03 -07001490static bool binder_dec_node_nilocked(struct binder_node *node,
1491 int strong, int internal)
Todd Kjosed297212017-06-29 12:02:01 -07001492{
1493 struct binder_proc *proc = node->proc;
1494
Martijn Coenen858b2712017-08-31 10:04:26 +02001495 assert_spin_locked(&node->lock);
Todd Kjosed297212017-06-29 12:02:01 -07001496 if (proc)
Martijn Coenen858b2712017-08-31 10:04:26 +02001497 assert_spin_locked(&proc->inner_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001498 if (strong) {
1499 if (internal)
1500 node->internal_strong_refs--;
1501 else
1502 node->local_strong_refs--;
1503 if (node->local_strong_refs || node->internal_strong_refs)
Todd Kjosed297212017-06-29 12:02:01 -07001504 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001505 } else {
1506 if (!internal)
1507 node->local_weak_refs--;
Todd Kjosadc18842017-06-29 12:01:59 -07001508 if (node->local_weak_refs || node->tmp_refs ||
1509 !hlist_empty(&node->refs))
Todd Kjosed297212017-06-29 12:02:01 -07001510 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001511 }
Todd Kjosed297212017-06-29 12:02:01 -07001512
1513 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001514 if (list_empty(&node->work.entry)) {
Todd Kjos72196392017-06-29 12:02:02 -07001515 binder_enqueue_work_ilocked(&node->work, &proc->todo);
Martijn Coenen408c68b2017-08-31 10:04:19 +02001516 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001517 }
1518 } else {
1519 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
Todd Kjosadc18842017-06-29 12:01:59 -07001520 !node->local_weak_refs && !node->tmp_refs) {
Todd Kjosed297212017-06-29 12:02:01 -07001521 if (proc) {
Todd Kjos72196392017-06-29 12:02:02 -07001522 binder_dequeue_work_ilocked(&node->work);
1523 rb_erase(&node->rb_node, &proc->nodes);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001524 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301525 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001526 node->debug_id);
1527 } else {
Todd Kjos72196392017-06-29 12:02:02 -07001528 BUG_ON(!list_empty(&node->work.entry));
Todd Kjosc44b1232017-06-29 12:01:43 -07001529 spin_lock(&binder_dead_nodes_lock);
Todd Kjosed297212017-06-29 12:02:01 -07001530 /*
1531 * tmp_refs could have changed so
1532 * check it again
1533 */
1534 if (node->tmp_refs) {
1535 spin_unlock(&binder_dead_nodes_lock);
1536 return false;
1537 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001538 hlist_del(&node->dead_node);
Todd Kjosc44b1232017-06-29 12:01:43 -07001539 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001540 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301541 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001542 node->debug_id);
1543 }
Todd Kjosed297212017-06-29 12:02:01 -07001544 return true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001545 }
1546 }
Todd Kjosed297212017-06-29 12:02:01 -07001547 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001548}
1549
Todd Kjosed297212017-06-29 12:02:01 -07001550static void binder_dec_node(struct binder_node *node, int strong, int internal)
1551{
1552 bool free_node;
1553
Todd Kjos673068e2017-06-29 12:02:03 -07001554 binder_node_inner_lock(node);
1555 free_node = binder_dec_node_nilocked(node, strong, internal);
1556 binder_node_inner_unlock(node);
Todd Kjosed297212017-06-29 12:02:01 -07001557 if (free_node)
1558 binder_free_node(node);
1559}
1560
1561static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
Todd Kjosadc18842017-06-29 12:01:59 -07001562{
1563 /*
1564 * No call to binder_inc_node() is needed since we
1565 * don't need to inform userspace of any changes to
1566 * tmp_refs
1567 */
1568 node->tmp_refs++;
1569}
1570
1571/**
Todd Kjosed297212017-06-29 12:02:01 -07001572 * binder_inc_node_tmpref() - take a temporary reference on node
1573 * @node: node to reference
1574 *
1575 * Take reference on node to prevent the node from being freed
1576 * while referenced only by a local variable. The inner lock is
1577 * needed to serialize with the node work on the queue (which
1578 * isn't needed after the node is dead). If the node is dead
1579 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1580 * node->tmp_refs against dead-node-only cases where the node
1581 * lock cannot be acquired (eg traversing the dead node list to
1582 * print nodes)
1583 */
1584static void binder_inc_node_tmpref(struct binder_node *node)
1585{
Todd Kjos673068e2017-06-29 12:02:03 -07001586 binder_node_lock(node);
Todd Kjosed297212017-06-29 12:02:01 -07001587 if (node->proc)
1588 binder_inner_proc_lock(node->proc);
1589 else
1590 spin_lock(&binder_dead_nodes_lock);
1591 binder_inc_node_tmpref_ilocked(node);
1592 if (node->proc)
1593 binder_inner_proc_unlock(node->proc);
1594 else
1595 spin_unlock(&binder_dead_nodes_lock);
Todd Kjos673068e2017-06-29 12:02:03 -07001596 binder_node_unlock(node);
Todd Kjosed297212017-06-29 12:02:01 -07001597}
1598
1599/**
Todd Kjosadc18842017-06-29 12:01:59 -07001600 * binder_dec_node_tmpref() - remove a temporary reference on node
1601 * @node: node to reference
1602 *
1603 * Release temporary reference on node taken via binder_inc_node_tmpref()
1604 */
1605static void binder_dec_node_tmpref(struct binder_node *node)
1606{
Todd Kjosed297212017-06-29 12:02:01 -07001607 bool free_node;
1608
Todd Kjos673068e2017-06-29 12:02:03 -07001609 binder_node_inner_lock(node);
1610 if (!node->proc)
Todd Kjosed297212017-06-29 12:02:01 -07001611 spin_lock(&binder_dead_nodes_lock);
Todd Kjosadc18842017-06-29 12:01:59 -07001612 node->tmp_refs--;
1613 BUG_ON(node->tmp_refs < 0);
Todd Kjosed297212017-06-29 12:02:01 -07001614 if (!node->proc)
1615 spin_unlock(&binder_dead_nodes_lock);
Todd Kjosadc18842017-06-29 12:01:59 -07001616 /*
1617 * Call binder_dec_node() to check if all refcounts are 0
1618 * and cleanup is needed. Calling with strong=0 and internal=1
1619 * causes no actual reference to be released in binder_dec_node().
1620 * If that changes, a change is needed here too.
1621 */
Todd Kjos673068e2017-06-29 12:02:03 -07001622 free_node = binder_dec_node_nilocked(node, 0, 1);
1623 binder_node_inner_unlock(node);
Todd Kjosed297212017-06-29 12:02:01 -07001624 if (free_node)
1625 binder_free_node(node);
Todd Kjosadc18842017-06-29 12:01:59 -07001626}
1627
1628static void binder_put_node(struct binder_node *node)
1629{
1630 binder_dec_node_tmpref(node);
1631}
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001632
Todd Kjos2c1838d2017-06-29 12:02:08 -07001633static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1634 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001635{
1636 struct rb_node *n = proc->refs_by_desc.rb_node;
1637 struct binder_ref *ref;
1638
1639 while (n) {
1640 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1641
Todd Kjos372e3142017-06-29 12:01:58 -07001642 if (desc < ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001643 n = n->rb_left;
Todd Kjos372e3142017-06-29 12:01:58 -07001644 } else if (desc > ref->data.desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001645 n = n->rb_right;
Todd Kjos372e3142017-06-29 12:01:58 -07001646 } else if (need_strong_ref && !ref->data.strong) {
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001647 binder_user_error("tried to use weak ref as strong ref\n");
1648 return NULL;
1649 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001650 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001651 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001652 }
1653 return NULL;
1654}
1655
Todd Kjos372e3142017-06-29 12:01:58 -07001656/**
Todd Kjos2c1838d2017-06-29 12:02:08 -07001657 * binder_get_ref_for_node_olocked() - get the ref associated with given node
Todd Kjos372e3142017-06-29 12:01:58 -07001658 * @proc: binder_proc that owns the ref
1659 * @node: binder_node of target
1660 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1661 *
1662 * Look up the ref for the given node and return it if it exists
1663 *
1664 * If it doesn't exist and the caller provides a newly allocated
1665 * ref, initialize the fields of the newly allocated ref and insert
1666 * into the given proc rb_trees and node refs list.
1667 *
1668 * Return: the ref for node. It is possible that another thread
1669 * allocated/initialized the ref first in which case the
1670 * returned ref would be different than the passed-in
1671 * new_ref. new_ref must be kfree'd by the caller in
1672 * this case.
1673 */
Todd Kjos2c1838d2017-06-29 12:02:08 -07001674static struct binder_ref *binder_get_ref_for_node_olocked(
1675 struct binder_proc *proc,
1676 struct binder_node *node,
1677 struct binder_ref *new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001678{
Todd Kjos372e3142017-06-29 12:01:58 -07001679 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001680 struct rb_node **p = &proc->refs_by_node.rb_node;
1681 struct rb_node *parent = NULL;
Todd Kjos372e3142017-06-29 12:01:58 -07001682 struct binder_ref *ref;
1683 struct rb_node *n;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001684
1685 while (*p) {
1686 parent = *p;
1687 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1688
1689 if (node < ref->node)
1690 p = &(*p)->rb_left;
1691 else if (node > ref->node)
1692 p = &(*p)->rb_right;
1693 else
1694 return ref;
1695 }
Todd Kjos372e3142017-06-29 12:01:58 -07001696 if (!new_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001697 return NULL;
Todd Kjos372e3142017-06-29 12:01:58 -07001698
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001699 binder_stats_created(BINDER_STAT_REF);
Todd Kjos372e3142017-06-29 12:01:58 -07001700 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001701 new_ref->proc = proc;
1702 new_ref->node = node;
1703 rb_link_node(&new_ref->rb_node_node, parent, p);
1704 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1705
Todd Kjos372e3142017-06-29 12:01:58 -07001706 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001707 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1708 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Todd Kjos372e3142017-06-29 12:01:58 -07001709 if (ref->data.desc > new_ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001710 break;
Todd Kjos372e3142017-06-29 12:01:58 -07001711 new_ref->data.desc = ref->data.desc + 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001712 }
1713
1714 p = &proc->refs_by_desc.rb_node;
1715 while (*p) {
1716 parent = *p;
1717 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1718
Todd Kjos372e3142017-06-29 12:01:58 -07001719 if (new_ref->data.desc < ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001720 p = &(*p)->rb_left;
Todd Kjos372e3142017-06-29 12:01:58 -07001721 else if (new_ref->data.desc > ref->data.desc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001722 p = &(*p)->rb_right;
1723 else
1724 BUG();
1725 }
1726 rb_link_node(&new_ref->rb_node_desc, parent, p);
1727 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjos673068e2017-06-29 12:02:03 -07001728
1729 binder_node_lock(node);
Todd Kjose4cffcf2017-06-29 12:01:50 -07001730 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001731
Todd Kjose4cffcf2017-06-29 12:01:50 -07001732 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1733 "%d new ref %d desc %d for node %d\n",
Todd Kjos372e3142017-06-29 12:01:58 -07001734 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
Todd Kjose4cffcf2017-06-29 12:01:50 -07001735 node->debug_id);
Todd Kjos673068e2017-06-29 12:02:03 -07001736 binder_node_unlock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001737 return new_ref;
1738}
1739
Todd Kjos2c1838d2017-06-29 12:02:08 -07001740static void binder_cleanup_ref_olocked(struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001741{
Todd Kjosed297212017-06-29 12:02:01 -07001742 bool delete_node = false;
Todd Kjosed297212017-06-29 12:02:01 -07001743
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001744 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301745 "%d delete ref %d desc %d for node %d\n",
Todd Kjos372e3142017-06-29 12:01:58 -07001746 ref->proc->pid, ref->data.debug_id, ref->data.desc,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301747 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001748
1749 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1750 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
Todd Kjos372e3142017-06-29 12:01:58 -07001751
Todd Kjos673068e2017-06-29 12:02:03 -07001752 binder_node_inner_lock(ref->node);
Todd Kjos372e3142017-06-29 12:01:58 -07001753 if (ref->data.strong)
Todd Kjos673068e2017-06-29 12:02:03 -07001754 binder_dec_node_nilocked(ref->node, 1, 1);
Todd Kjos372e3142017-06-29 12:01:58 -07001755
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001756 hlist_del(&ref->node_entry);
Todd Kjos673068e2017-06-29 12:02:03 -07001757 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1758 binder_node_inner_unlock(ref->node);
Todd Kjosed297212017-06-29 12:02:01 -07001759 /*
1760 * Clear ref->node unless we want the caller to free the node
1761 */
1762 if (!delete_node) {
1763 /*
1764 * The caller uses ref->node to determine
1765 * whether the node needs to be freed. Clear
1766 * it since the node is still alive.
1767 */
1768 ref->node = NULL;
1769 }
Todd Kjos372e3142017-06-29 12:01:58 -07001770
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001771 if (ref->death) {
1772 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301773 "%d delete ref %d desc %d has death notification\n",
Todd Kjos372e3142017-06-29 12:01:58 -07001774 ref->proc->pid, ref->data.debug_id,
1775 ref->data.desc);
Todd Kjos72196392017-06-29 12:02:02 -07001776 binder_dequeue_work(ref->proc, &ref->death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001777 binder_stats_deleted(BINDER_STAT_DEATH);
1778 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001779 binder_stats_deleted(BINDER_STAT_REF);
1780}
1781
Todd Kjos372e3142017-06-29 12:01:58 -07001782/**
Todd Kjos2c1838d2017-06-29 12:02:08 -07001783 * binder_inc_ref_olocked() - increment the ref for given handle
Todd Kjos372e3142017-06-29 12:01:58 -07001784 * @ref: ref to be incremented
1785 * @strong: if true, strong increment, else weak
1786 * @target_list: list to queue node work on
1787 *
Todd Kjos2c1838d2017-06-29 12:02:08 -07001788 * Increment the ref. @ref->proc->outer_lock must be held on entry
Todd Kjos372e3142017-06-29 12:01:58 -07001789 *
1790 * Return: 0, if successful, else errno
1791 */
Todd Kjos2c1838d2017-06-29 12:02:08 -07001792static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1793 struct list_head *target_list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001794{
1795 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001796
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001797 if (strong) {
Todd Kjos372e3142017-06-29 12:01:58 -07001798 if (ref->data.strong == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001799 ret = binder_inc_node(ref->node, 1, 1, target_list);
1800 if (ret)
1801 return ret;
1802 }
Todd Kjos372e3142017-06-29 12:01:58 -07001803 ref->data.strong++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001804 } else {
Todd Kjos372e3142017-06-29 12:01:58 -07001805 if (ref->data.weak == 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001806 ret = binder_inc_node(ref->node, 0, 1, target_list);
1807 if (ret)
1808 return ret;
1809 }
Todd Kjos372e3142017-06-29 12:01:58 -07001810 ref->data.weak++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001811 }
1812 return 0;
1813}
1814
Todd Kjos372e3142017-06-29 12:01:58 -07001815/**
1816 * binder_dec_ref() - dec the ref for given handle
1817 * @ref: ref to be decremented
1818 * @strong: if true, strong decrement, else weak
1819 *
1820 * Decrement the ref.
1821 *
Todd Kjos372e3142017-06-29 12:01:58 -07001822 * Return: true if ref is cleaned up and ready to be freed
1823 */
Todd Kjos2c1838d2017-06-29 12:02:08 -07001824static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001825{
1826 if (strong) {
Todd Kjos372e3142017-06-29 12:01:58 -07001827 if (ref->data.strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301828 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Todd Kjos372e3142017-06-29 12:01:58 -07001829 ref->proc->pid, ref->data.debug_id,
1830 ref->data.desc, ref->data.strong,
1831 ref->data.weak);
1832 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001833 }
Todd Kjos372e3142017-06-29 12:01:58 -07001834 ref->data.strong--;
Todd Kjosed297212017-06-29 12:02:01 -07001835 if (ref->data.strong == 0)
1836 binder_dec_node(ref->node, strong, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001837 } else {
Todd Kjos372e3142017-06-29 12:01:58 -07001838 if (ref->data.weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301839 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Todd Kjos372e3142017-06-29 12:01:58 -07001840 ref->proc->pid, ref->data.debug_id,
1841 ref->data.desc, ref->data.strong,
1842 ref->data.weak);
1843 return false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001844 }
Todd Kjos372e3142017-06-29 12:01:58 -07001845 ref->data.weak--;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001846 }
Todd Kjos372e3142017-06-29 12:01:58 -07001847 if (ref->data.strong == 0 && ref->data.weak == 0) {
Todd Kjos2c1838d2017-06-29 12:02:08 -07001848 binder_cleanup_ref_olocked(ref);
Todd Kjos372e3142017-06-29 12:01:58 -07001849 return true;
1850 }
1851 return false;
1852}
1853
1854/**
1855 * binder_get_node_from_ref() - get the node from the given proc/desc
1856 * @proc: proc containing the ref
1857 * @desc: the handle associated with the ref
1858 * @need_strong_ref: if true, only return node if ref is strong
1859 * @rdata: the id/refcount data for the ref
1860 *
1861 * Given a proc and ref handle, return the associated binder_node
1862 *
1863 * Return: a binder_node or NULL if not found or not strong when strong required
1864 */
1865static struct binder_node *binder_get_node_from_ref(
1866 struct binder_proc *proc,
1867 u32 desc, bool need_strong_ref,
1868 struct binder_ref_data *rdata)
1869{
1870 struct binder_node *node;
1871 struct binder_ref *ref;
1872
Todd Kjos2c1838d2017-06-29 12:02:08 -07001873 binder_proc_lock(proc);
1874 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
Todd Kjos372e3142017-06-29 12:01:58 -07001875 if (!ref)
1876 goto err_no_ref;
1877 node = ref->node;
Todd Kjosadc18842017-06-29 12:01:59 -07001878 /*
1879 * Take an implicit reference on the node to ensure
1880 * it stays alive until the call to binder_put_node()
1881 */
1882 binder_inc_node_tmpref(node);
Todd Kjos372e3142017-06-29 12:01:58 -07001883 if (rdata)
1884 *rdata = ref->data;
Todd Kjos2c1838d2017-06-29 12:02:08 -07001885 binder_proc_unlock(proc);
Todd Kjos372e3142017-06-29 12:01:58 -07001886
1887 return node;
1888
1889err_no_ref:
Todd Kjos2c1838d2017-06-29 12:02:08 -07001890 binder_proc_unlock(proc);
Todd Kjos372e3142017-06-29 12:01:58 -07001891 return NULL;
1892}
1893
1894/**
1895 * binder_free_ref() - free the binder_ref
1896 * @ref: ref to free
1897 *
Todd Kjosed297212017-06-29 12:02:01 -07001898 * Free the binder_ref. Free the binder_node indicated by ref->node
1899 * (if non-NULL) and the binder_ref_death indicated by ref->death.
Todd Kjos372e3142017-06-29 12:01:58 -07001900 */
1901static void binder_free_ref(struct binder_ref *ref)
1902{
Todd Kjosed297212017-06-29 12:02:01 -07001903 if (ref->node)
1904 binder_free_node(ref->node);
Todd Kjos372e3142017-06-29 12:01:58 -07001905 kfree(ref->death);
1906 kfree(ref);
1907}
1908
1909/**
1910 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1911 * @proc: proc containing the ref
1912 * @desc: the handle associated with the ref
1913 * @increment: true=inc reference, false=dec reference
1914 * @strong: true=strong reference, false=weak reference
1915 * @rdata: the id/refcount data for the ref
1916 *
1917 * Given a proc and ref handle, increment or decrement the ref
1918 * according to "increment" arg.
1919 *
1920 * Return: 0 if successful, else errno
1921 */
1922static int binder_update_ref_for_handle(struct binder_proc *proc,
1923 uint32_t desc, bool increment, bool strong,
1924 struct binder_ref_data *rdata)
1925{
1926 int ret = 0;
1927 struct binder_ref *ref;
1928 bool delete_ref = false;
1929
Todd Kjos2c1838d2017-06-29 12:02:08 -07001930 binder_proc_lock(proc);
1931 ref = binder_get_ref_olocked(proc, desc, strong);
Todd Kjos372e3142017-06-29 12:01:58 -07001932 if (!ref) {
1933 ret = -EINVAL;
1934 goto err_no_ref;
1935 }
1936 if (increment)
Todd Kjos2c1838d2017-06-29 12:02:08 -07001937 ret = binder_inc_ref_olocked(ref, strong, NULL);
Todd Kjos372e3142017-06-29 12:01:58 -07001938 else
Todd Kjos2c1838d2017-06-29 12:02:08 -07001939 delete_ref = binder_dec_ref_olocked(ref, strong);
Todd Kjos372e3142017-06-29 12:01:58 -07001940
1941 if (rdata)
1942 *rdata = ref->data;
Todd Kjos2c1838d2017-06-29 12:02:08 -07001943 binder_proc_unlock(proc);
Todd Kjos372e3142017-06-29 12:01:58 -07001944
1945 if (delete_ref)
1946 binder_free_ref(ref);
1947 return ret;
1948
1949err_no_ref:
Todd Kjos2c1838d2017-06-29 12:02:08 -07001950 binder_proc_unlock(proc);
Todd Kjos372e3142017-06-29 12:01:58 -07001951 return ret;
1952}
1953
1954/**
1955 * binder_dec_ref_for_handle() - dec the ref for given handle
1956 * @proc: proc containing the ref
1957 * @desc: the handle associated with the ref
1958 * @strong: true=strong reference, false=weak reference
1959 * @rdata: the id/refcount data for the ref
1960 *
1961 * Just calls binder_update_ref_for_handle() to decrement the ref.
1962 *
1963 * Return: 0 if successful, else errno
1964 */
1965static int binder_dec_ref_for_handle(struct binder_proc *proc,
1966 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1967{
1968 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1969}
1970
1971
1972/**
1973 * binder_inc_ref_for_node() - increment the ref for given proc/node
1974 * @proc: proc containing the ref
1975 * @node: target node
1976 * @strong: true=strong reference, false=weak reference
1977 * @target_list: worklist to use if node is incremented
1978 * @rdata: the id/refcount data for the ref
1979 *
1980 * Given a proc and node, increment the ref. Create the ref if it
1981 * doesn't already exist
1982 *
1983 * Return: 0 if successful, else errno
1984 */
1985static int binder_inc_ref_for_node(struct binder_proc *proc,
1986 struct binder_node *node,
1987 bool strong,
1988 struct list_head *target_list,
1989 struct binder_ref_data *rdata)
1990{
1991 struct binder_ref *ref;
1992 struct binder_ref *new_ref = NULL;
1993 int ret = 0;
1994
Todd Kjos2c1838d2017-06-29 12:02:08 -07001995 binder_proc_lock(proc);
1996 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
Todd Kjos372e3142017-06-29 12:01:58 -07001997 if (!ref) {
Todd Kjos2c1838d2017-06-29 12:02:08 -07001998 binder_proc_unlock(proc);
Todd Kjos372e3142017-06-29 12:01:58 -07001999 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
2000 if (!new_ref)
2001 return -ENOMEM;
Todd Kjos2c1838d2017-06-29 12:02:08 -07002002 binder_proc_lock(proc);
2003 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
Todd Kjos372e3142017-06-29 12:01:58 -07002004 }
Todd Kjos2c1838d2017-06-29 12:02:08 -07002005 ret = binder_inc_ref_olocked(ref, strong, target_list);
Todd Kjos372e3142017-06-29 12:01:58 -07002006 *rdata = ref->data;
Todd Kjos2c1838d2017-06-29 12:02:08 -07002007 binder_proc_unlock(proc);
Todd Kjos372e3142017-06-29 12:01:58 -07002008 if (new_ref && ref != new_ref)
2009 /*
2010 * Another thread created the ref first so
2011 * free the one we allocated
2012 */
2013 kfree(new_ref);
2014 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002015}
2016
Martijn Coenen0b89d692017-06-29 12:02:06 -07002017static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
2018 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002019{
Todd Kjosb6d282c2017-06-29 12:01:54 -07002020 BUG_ON(!target_thread);
Martijn Coenen858b2712017-08-31 10:04:26 +02002021 assert_spin_locked(&target_thread->proc->inner_lock);
Todd Kjosb6d282c2017-06-29 12:01:54 -07002022 BUG_ON(target_thread->transaction_stack != t);
2023 BUG_ON(target_thread->transaction_stack->from != target_thread);
2024 target_thread->transaction_stack =
2025 target_thread->transaction_stack->from_parent;
2026 t->from = NULL;
2027}
2028
Todd Kjos7a4408c2017-06-29 12:01:57 -07002029/**
2030 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2031 * @thread: thread to decrement
2032 *
2033 * A thread needs to be kept alive while being used to create or
2034 * handle a transaction. binder_get_txn_from() is used to safely
2035 * extract t->from from a binder_transaction and keep the thread
2036 * indicated by t->from from being freed. When done with that
2037 * binder_thread, this function is called to decrement the
2038 * tmp_ref and free if appropriate (thread has been released
2039 * and no transaction being processed by the driver)
2040 */
2041static void binder_thread_dec_tmpref(struct binder_thread *thread)
2042{
2043 /*
2044 * atomic is used to protect the counter value while
2045 * it cannot reach zero or thread->is_dead is false
Todd Kjos7a4408c2017-06-29 12:01:57 -07002046 */
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07002047 binder_inner_proc_lock(thread->proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002048 atomic_dec(&thread->tmp_ref);
2049 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07002050 binder_inner_proc_unlock(thread->proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002051 binder_free_thread(thread);
2052 return;
2053 }
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07002054 binder_inner_proc_unlock(thread->proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002055}
2056
2057/**
2058 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2059 * @proc: proc to decrement
2060 *
2061 * A binder_proc needs to be kept alive while being used to create or
2062 * handle a transaction. proc->tmp_ref is incremented when
2063 * creating a new transaction or the binder_proc is currently in-use
2064 * by threads that are being released. When done with the binder_proc,
2065 * this function is called to decrement the counter and free the
2066 * proc if appropriate (proc has been released, all threads have
2067 * been released and not currenly in-use to process a transaction).
2068 */
2069static void binder_proc_dec_tmpref(struct binder_proc *proc)
2070{
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07002071 binder_inner_proc_lock(proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002072 proc->tmp_ref--;
2073 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2074 !proc->tmp_ref) {
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07002075 binder_inner_proc_unlock(proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002076 binder_free_proc(proc);
2077 return;
2078 }
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07002079 binder_inner_proc_unlock(proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002080}
2081
2082/**
2083 * binder_get_txn_from() - safely extract the "from" thread in transaction
2084 * @t: binder transaction for t->from
2085 *
2086 * Atomically return the "from" thread and increment the tmp_ref
2087 * count for the thread to ensure it stays alive until
2088 * binder_thread_dec_tmpref() is called.
2089 *
2090 * Return: the value of t->from
2091 */
2092static struct binder_thread *binder_get_txn_from(
2093 struct binder_transaction *t)
2094{
2095 struct binder_thread *from;
2096
2097 spin_lock(&t->lock);
2098 from = t->from;
2099 if (from)
2100 atomic_inc(&from->tmp_ref);
2101 spin_unlock(&t->lock);
2102 return from;
2103}
2104
Martijn Coenen0b89d692017-06-29 12:02:06 -07002105/**
2106 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2107 * @t: binder transaction for t->from
2108 *
2109 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2110 * to guarantee that the thread cannot be released while operating on it.
2111 * The caller must call binder_inner_proc_unlock() to release the inner lock
2112 * as well as call binder_dec_thread_txn() to release the reference.
2113 *
2114 * Return: the value of t->from
2115 */
2116static struct binder_thread *binder_get_txn_from_and_acq_inner(
2117 struct binder_transaction *t)
2118{
2119 struct binder_thread *from;
2120
2121 from = binder_get_txn_from(t);
2122 if (!from)
2123 return NULL;
2124 binder_inner_proc_lock(from->proc);
2125 if (t->from) {
2126 BUG_ON(from != t->from);
2127 return from;
2128 }
2129 binder_inner_proc_unlock(from->proc);
2130 binder_thread_dec_tmpref(from);
2131 return NULL;
2132}
2133
Todd Kjosb6d282c2017-06-29 12:01:54 -07002134static void binder_free_transaction(struct binder_transaction *t)
2135{
Todd Kjos22068d42019-06-12 13:29:27 -07002136 struct binder_proc *target_proc = t->to_proc;
2137
2138 if (target_proc) {
2139 binder_inner_proc_lock(target_proc);
2140 if (t->buffer)
2141 t->buffer->transaction = NULL;
2142 binder_inner_proc_unlock(target_proc);
2143 }
2144 /*
2145 * If the transaction has no target_proc, then
2146 * t->buffer->transaction has already been cleared.
2147 */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002148 kfree(t);
2149 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2150}
2151
2152static void binder_send_failed_reply(struct binder_transaction *t,
2153 uint32_t error_code)
2154{
2155 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002156 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09002157
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002158 BUG_ON(t->flags & TF_ONE_WAY);
2159 while (1) {
Martijn Coenen0b89d692017-06-29 12:02:06 -07002160 target_thread = binder_get_txn_from_and_acq_inner(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002161 if (target_thread) {
Todd Kjos26549d12017-06-29 12:01:55 -07002162 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2163 "send failed reply for transaction %d to %d:%d\n",
2164 t->debug_id,
2165 target_thread->proc->pid,
2166 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002167
Martijn Coenen0b89d692017-06-29 12:02:06 -07002168 binder_pop_transaction_ilocked(target_thread, t);
Todd Kjos26549d12017-06-29 12:01:55 -07002169 if (target_thread->reply_error.cmd == BR_OK) {
2170 target_thread->reply_error.cmd = error_code;
Martijn Coenen148ade22017-11-15 09:21:35 +01002171 binder_enqueue_thread_work_ilocked(
2172 target_thread,
2173 &target_thread->reply_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002174 wake_up_interruptible(&target_thread->wait);
2175 } else {
Todd Kjose46a3b32018-02-07 12:38:47 -08002176 /*
2177 * Cannot get here for normal operation, but
2178 * we can if multiple synchronous transactions
2179 * are sent without blocking for responses.
2180 * Just ignore the 2nd error in this case.
2181 */
2182 pr_warn("Unexpected reply error: %u\n",
2183 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002184 }
Martijn Coenen0b89d692017-06-29 12:02:06 -07002185 binder_inner_proc_unlock(target_thread->proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002186 binder_thread_dec_tmpref(target_thread);
Todd Kjos26549d12017-06-29 12:01:55 -07002187 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002188 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002189 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002190 next = t->from_parent;
2191
2192 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2193 "send failed reply for transaction %d, target dead\n",
2194 t->debug_id);
2195
Todd Kjosb6d282c2017-06-29 12:01:54 -07002196 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -03002197 if (next == NULL) {
2198 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2199 "reply failed, no target thread at root\n");
2200 return;
2201 }
2202 t = next;
2203 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2204 "reply failed, no target thread -- retry %d\n",
2205 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002206 }
2207}
2208
Martijn Coenenfeba3902017-02-03 14:40:45 -08002209/**
Martijn Coenenfb2c4452017-11-13 10:06:08 +01002210 * binder_cleanup_transaction() - cleans up undelivered transaction
2211 * @t: transaction that needs to be cleaned up
2212 * @reason: reason the transaction wasn't delivered
2213 * @error_code: error to return to caller (if synchronous call)
2214 */
2215static void binder_cleanup_transaction(struct binder_transaction *t,
2216 const char *reason,
2217 uint32_t error_code)
2218{
2219 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2220 binder_send_failed_reply(t, error_code);
2221 } else {
2222 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2223 "undelivered transaction %d, %s\n",
2224 t->debug_id, reason);
2225 binder_free_transaction(t);
2226 }
2227}
2228
2229/**
Todd Kjosa08646b2019-02-08 10:35:16 -08002230 * binder_get_object() - gets object and checks for valid metadata
2231 * @proc: binder_proc owning the buffer
Martijn Coenenfeba3902017-02-03 14:40:45 -08002232 * @buffer: binder_buffer that we're parsing.
Todd Kjosa08646b2019-02-08 10:35:16 -08002233 * @offset: offset in the @buffer at which to validate an object.
2234 * @object: struct binder_object to read into
Martijn Coenenfeba3902017-02-03 14:40:45 -08002235 *
2236 * Return: If there's a valid metadata object at @offset in @buffer, the
Todd Kjosa08646b2019-02-08 10:35:16 -08002237 * size of that object. Otherwise, it returns zero. The object
2238 * is read into the struct binder_object pointed to by @object.
Martijn Coenenfeba3902017-02-03 14:40:45 -08002239 */
Todd Kjosa08646b2019-02-08 10:35:16 -08002240static size_t binder_get_object(struct binder_proc *proc,
2241 struct binder_buffer *buffer,
2242 unsigned long offset,
2243 struct binder_object *object)
Martijn Coenenfeba3902017-02-03 14:40:45 -08002244{
Todd Kjosa08646b2019-02-08 10:35:16 -08002245 size_t read_size;
Martijn Coenenfeba3902017-02-03 14:40:45 -08002246 struct binder_object_header *hdr;
2247 size_t object_size = 0;
2248
Todd Kjosa08646b2019-02-08 10:35:16 -08002249 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
Todd Kjos684e11d2019-03-19 09:53:01 -07002250 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2251 !IS_ALIGNED(offset, sizeof(u32)))
Martijn Coenenfeba3902017-02-03 14:40:45 -08002252 return 0;
Todd Kjosa08646b2019-02-08 10:35:16 -08002253 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2254 offset, read_size);
Martijn Coenenfeba3902017-02-03 14:40:45 -08002255
Todd Kjosa08646b2019-02-08 10:35:16 -08002256 /* Ok, now see if we read a complete object. */
2257 hdr = &object->hdr;
Martijn Coenenfeba3902017-02-03 14:40:45 -08002258 switch (hdr->type) {
2259 case BINDER_TYPE_BINDER:
2260 case BINDER_TYPE_WEAK_BINDER:
2261 case BINDER_TYPE_HANDLE:
2262 case BINDER_TYPE_WEAK_HANDLE:
2263 object_size = sizeof(struct flat_binder_object);
2264 break;
2265 case BINDER_TYPE_FD:
2266 object_size = sizeof(struct binder_fd_object);
2267 break;
Martijn Coenen79802402017-02-03 14:40:51 -08002268 case BINDER_TYPE_PTR:
2269 object_size = sizeof(struct binder_buffer_object);
2270 break;
Martijn Coenendef95c72017-02-03 14:40:52 -08002271 case BINDER_TYPE_FDA:
2272 object_size = sizeof(struct binder_fd_array_object);
2273 break;
Martijn Coenenfeba3902017-02-03 14:40:45 -08002274 default:
2275 return 0;
2276 }
2277 if (offset <= buffer->data_size - object_size &&
2278 buffer->data_size >= object_size)
2279 return object_size;
2280 else
2281 return 0;
2282}
2283
Martijn Coenen79802402017-02-03 14:40:51 -08002284/**
2285 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
Todd Kjosbaac2252019-02-08 10:35:17 -08002286 * @proc: binder_proc owning the buffer
Martijn Coenen79802402017-02-03 14:40:51 -08002287 * @b: binder_buffer containing the object
Todd Kjosbaac2252019-02-08 10:35:17 -08002288 * @object: struct binder_object to read into
Martijn Coenen79802402017-02-03 14:40:51 -08002289 * @index: index in offset array at which the binder_buffer_object is
2290 * located
Todd Kjosbaac2252019-02-08 10:35:17 -08002291 * @start_offset: points to the start of the offset array
2292 * @object_offsetp: offset of @object read from @b
Martijn Coenen79802402017-02-03 14:40:51 -08002293 * @num_valid: the number of valid offsets in the offset array
2294 *
2295 * Return: If @index is within the valid range of the offset array
2296 * described by @start and @num_valid, and if there's a valid
2297 * binder_buffer_object at the offset found in index @index
2298 * of the offset array, that object is returned. Otherwise,
2299 * %NULL is returned.
2300 * Note that the offset found in index @index itself is not
2301 * verified; this function assumes that @num_valid elements
2302 * from @start were previously verified to have valid offsets.
Todd Kjosbaac2252019-02-08 10:35:17 -08002303 * If @object_offsetp is non-NULL, then the offset within
2304 * @b is written to it.
Martijn Coenen79802402017-02-03 14:40:51 -08002305 */
Todd Kjosbaac2252019-02-08 10:35:17 -08002306static struct binder_buffer_object *binder_validate_ptr(
2307 struct binder_proc *proc,
2308 struct binder_buffer *b,
2309 struct binder_object *object,
2310 binder_size_t index,
2311 binder_size_t start_offset,
2312 binder_size_t *object_offsetp,
2313 binder_size_t num_valid)
Martijn Coenen79802402017-02-03 14:40:51 -08002314{
Todd Kjosbaac2252019-02-08 10:35:17 -08002315 size_t object_size;
2316 binder_size_t object_offset;
2317 unsigned long buffer_offset;
Martijn Coenen79802402017-02-03 14:40:51 -08002318
2319 if (index >= num_valid)
2320 return NULL;
2321
Todd Kjosbaac2252019-02-08 10:35:17 -08002322 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2323 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2324 b, buffer_offset, sizeof(object_offset));
2325 object_size = binder_get_object(proc, b, object_offset, object);
2326 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
Martijn Coenen79802402017-02-03 14:40:51 -08002327 return NULL;
Todd Kjosbaac2252019-02-08 10:35:17 -08002328 if (object_offsetp)
2329 *object_offsetp = object_offset;
Martijn Coenen79802402017-02-03 14:40:51 -08002330
Todd Kjosbaac2252019-02-08 10:35:17 -08002331 return &object->bbo;
Martijn Coenen79802402017-02-03 14:40:51 -08002332}
2333
2334/**
2335 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
Todd Kjosbaac2252019-02-08 10:35:17 -08002336 * @proc: binder_proc owning the buffer
Martijn Coenen79802402017-02-03 14:40:51 -08002337 * @b: transaction buffer
Todd Kjosbaac2252019-02-08 10:35:17 -08002338 * @objects_start_offset: offset to start of objects buffer
2339 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2340 * @fixup_offset: start offset in @buffer to fix up
2341 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2342 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
Martijn Coenen79802402017-02-03 14:40:51 -08002343 *
2344 * Return: %true if a fixup in buffer @buffer at offset @offset is
2345 * allowed.
2346 *
2347 * For safety reasons, we only allow fixups inside a buffer to happen
2348 * at increasing offsets; additionally, we only allow fixup on the last
2349 * buffer object that was verified, or one of its parents.
2350 *
2351 * Example of what is allowed:
2352 *
2353 * A
2354 * B (parent = A, offset = 0)
2355 * C (parent = A, offset = 16)
2356 * D (parent = C, offset = 0)
2357 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2358 *
2359 * Examples of what is not allowed:
2360 *
2361 * Decreasing offsets within the same parent:
2362 * A
2363 * C (parent = A, offset = 16)
2364 * B (parent = A, offset = 0) // decreasing offset within A
2365 *
2366 * Referring to a parent that wasn't the last object or any of its parents:
2367 * A
2368 * B (parent = A, offset = 0)
2369 * C (parent = A, offset = 0)
2370 * C (parent = A, offset = 16)
2371 * D (parent = B, offset = 0) // B is not A or any of A's parents
2372 */
Todd Kjosbaac2252019-02-08 10:35:17 -08002373static bool binder_validate_fixup(struct binder_proc *proc,
2374 struct binder_buffer *b,
2375 binder_size_t objects_start_offset,
2376 binder_size_t buffer_obj_offset,
Martijn Coenen79802402017-02-03 14:40:51 -08002377 binder_size_t fixup_offset,
Todd Kjosbaac2252019-02-08 10:35:17 -08002378 binder_size_t last_obj_offset,
Martijn Coenen79802402017-02-03 14:40:51 -08002379 binder_size_t last_min_offset)
2380{
Todd Kjosbaac2252019-02-08 10:35:17 -08002381 if (!last_obj_offset) {
Martijn Coenen79802402017-02-03 14:40:51 -08002382 /* Nothing to fix up in */
2383 return false;
2384 }
2385
Todd Kjosbaac2252019-02-08 10:35:17 -08002386 while (last_obj_offset != buffer_obj_offset) {
2387 unsigned long buffer_offset;
2388 struct binder_object last_object;
2389 struct binder_buffer_object *last_bbo;
2390 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2391 &last_object);
2392 if (object_size != sizeof(*last_bbo))
2393 return false;
2394
2395 last_bbo = &last_object.bbo;
Martijn Coenen79802402017-02-03 14:40:51 -08002396 /*
2397 * Safe to retrieve the parent of last_obj, since it
2398 * was already previously verified by the driver.
2399 */
Todd Kjosbaac2252019-02-08 10:35:17 -08002400 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
Martijn Coenen79802402017-02-03 14:40:51 -08002401 return false;
Todd Kjosbaac2252019-02-08 10:35:17 -08002402 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2403 buffer_offset = objects_start_offset +
2404 sizeof(binder_size_t) * last_bbo->parent,
2405 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2406 b, buffer_offset,
2407 sizeof(last_obj_offset));
Martijn Coenen79802402017-02-03 14:40:51 -08002408 }
2409 return (fixup_offset >= last_min_offset);
2410}
2411
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002412static void binder_transaction_buffer_release(struct binder_proc *proc,
2413 struct binder_buffer *buffer,
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002414 binder_size_t failed_at,
2415 bool is_failure)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002416{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002417 int debug_id = buffer->debug_id;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002418 binder_size_t off_start_offset, buffer_offset, off_end_offset;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002419
2420 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002421 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002422 proc->pid, buffer->debug_id,
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002423 buffer->data_size, buffer->offsets_size,
2424 (unsigned long long)failed_at);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002425
2426 if (buffer->target_node)
2427 binder_dec_node(buffer->target_node, 1, 0);
2428
Todd Kjosbaac2252019-02-08 10:35:17 -08002429 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002430 off_end_offset = is_failure ? failed_at :
2431 off_start_offset + buffer->offsets_size;
2432 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2433 buffer_offset += sizeof(binder_size_t)) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08002434 struct binder_object_header *hdr;
Todd Kjos06c24272019-02-08 10:35:15 -08002435 size_t object_size;
Todd Kjosa08646b2019-02-08 10:35:16 -08002436 struct binder_object object;
Todd Kjos06c24272019-02-08 10:35:15 -08002437 binder_size_t object_offset;
Seunghun Lee10f62862014-05-01 01:30:23 +09002438
Todd Kjos06c24272019-02-08 10:35:15 -08002439 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2440 buffer, buffer_offset,
2441 sizeof(object_offset));
Todd Kjosa08646b2019-02-08 10:35:16 -08002442 object_size = binder_get_object(proc, buffer,
2443 object_offset, &object);
Martijn Coenenfeba3902017-02-03 14:40:45 -08002444 if (object_size == 0) {
2445 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Todd Kjos06c24272019-02-08 10:35:15 -08002446 debug_id, (u64)object_offset, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002447 continue;
2448 }
Todd Kjosa08646b2019-02-08 10:35:16 -08002449 hdr = &object.hdr;
Martijn Coenenfeba3902017-02-03 14:40:45 -08002450 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002451 case BINDER_TYPE_BINDER:
2452 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08002453 struct flat_binder_object *fp;
2454 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002455
Martijn Coenenfeba3902017-02-03 14:40:45 -08002456 fp = to_flat_binder_object(hdr);
2457 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002458 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002459 pr_err("transaction release %d bad node %016llx\n",
2460 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002461 break;
2462 }
2463 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002464 " node %d u%016llx\n",
2465 node->debug_id, (u64)node->ptr);
Martijn Coenenfeba3902017-02-03 14:40:45 -08002466 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2467 0);
Todd Kjosadc18842017-06-29 12:01:59 -07002468 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002469 } break;
2470 case BINDER_TYPE_HANDLE:
2471 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08002472 struct flat_binder_object *fp;
Todd Kjos372e3142017-06-29 12:01:58 -07002473 struct binder_ref_data rdata;
2474 int ret;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002475
Martijn Coenenfeba3902017-02-03 14:40:45 -08002476 fp = to_flat_binder_object(hdr);
Todd Kjos372e3142017-06-29 12:01:58 -07002477 ret = binder_dec_ref_for_handle(proc, fp->handle,
2478 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2479
2480 if (ret) {
2481 pr_err("transaction release %d bad handle %d, ret = %d\n",
2482 debug_id, fp->handle, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002483 break;
2484 }
2485 binder_debug(BINDER_DEBUG_TRANSACTION,
Todd Kjos372e3142017-06-29 12:01:58 -07002486 " ref %d desc %d\n",
2487 rdata.debug_id, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002488 } break;
2489
Martijn Coenenfeba3902017-02-03 14:40:45 -08002490 case BINDER_TYPE_FD: {
2491 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2492
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002493 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenenfeba3902017-02-03 14:40:45 -08002494 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002495 if (failed_at)
Martijn Coenenfeba3902017-02-03 14:40:45 -08002496 task_close_fd(proc, fp->fd);
2497 } break;
Martijn Coenen79802402017-02-03 14:40:51 -08002498 case BINDER_TYPE_PTR:
2499 /*
2500 * Nothing to do here, this will get cleaned up when the
2501 * transaction buffer gets freed
2502 */
2503 break;
Martijn Coenendef95c72017-02-03 14:40:52 -08002504 case BINDER_TYPE_FDA: {
2505 struct binder_fd_array_object *fda;
2506 struct binder_buffer_object *parent;
Todd Kjosbaac2252019-02-08 10:35:17 -08002507 struct binder_object ptr_object;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002508 binder_size_t fda_offset;
Martijn Coenendef95c72017-02-03 14:40:52 -08002509 size_t fd_index;
2510 binder_size_t fd_buf_size;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002511 binder_size_t num_valid;
Martijn Coenendef95c72017-02-03 14:40:52 -08002512
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002513 num_valid = (buffer_offset - off_start_offset) /
2514 sizeof(binder_size_t);
Martijn Coenendef95c72017-02-03 14:40:52 -08002515 fda = to_binder_fd_array_object(hdr);
Todd Kjosbaac2252019-02-08 10:35:17 -08002516 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2517 fda->parent,
2518 off_start_offset,
2519 NULL,
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002520 num_valid);
Martijn Coenendef95c72017-02-03 14:40:52 -08002521 if (!parent) {
Arvind Yadavf7f84fd2017-09-25 12:52:11 +05302522 pr_err("transaction release %d bad parent offset\n",
Martijn Coenendef95c72017-02-03 14:40:52 -08002523 debug_id);
2524 continue;
2525 }
Martijn Coenendef95c72017-02-03 14:40:52 -08002526 fd_buf_size = sizeof(u32) * fda->num_fds;
2527 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2528 pr_err("transaction release %d invalid number of fds (%lld)\n",
2529 debug_id, (u64)fda->num_fds);
2530 continue;
2531 }
2532 if (fd_buf_size > parent->length ||
2533 fda->parent_offset > parent->length - fd_buf_size) {
2534 /* No space for all file descriptors here. */
2535 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2536 debug_id, (u64)fda->num_fds);
2537 continue;
2538 }
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002539 /*
2540 * the source data for binder_buffer_object is visible
2541 * to user-space and the @buffer element is the user
2542 * pointer to the buffer_object containing the fd_array.
2543 * Convert the address to an offset relative to
2544 * the base of the transaction buffer.
2545 */
2546 fda_offset =
2547 (parent->buffer - (uintptr_t)buffer->user_data) +
2548 fda->parent_offset;
Todd Kjos06c24272019-02-08 10:35:15 -08002549 for (fd_index = 0; fd_index < fda->num_fds;
2550 fd_index++) {
2551 u32 fd;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002552 binder_size_t offset = fda_offset +
2553 fd_index * sizeof(fd);
Todd Kjos06c24272019-02-08 10:35:15 -08002554
2555 binder_alloc_copy_from_buffer(&proc->alloc,
2556 &fd,
2557 buffer,
2558 offset,
2559 sizeof(fd));
2560 task_close_fd(proc, fd);
2561 }
Martijn Coenendef95c72017-02-03 14:40:52 -08002562 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002563 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01002564 pr_err("transaction release %d bad object type %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08002565 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002566 break;
2567 }
2568 }
2569}
2570
Martijn Coenena056af42017-02-03 14:40:49 -08002571static int binder_translate_binder(struct flat_binder_object *fp,
2572 struct binder_transaction *t,
2573 struct binder_thread *thread)
2574{
2575 struct binder_node *node;
Martijn Coenena056af42017-02-03 14:40:49 -08002576 struct binder_proc *proc = thread->proc;
2577 struct binder_proc *target_proc = t->to_proc;
Todd Kjos372e3142017-06-29 12:01:58 -07002578 struct binder_ref_data rdata;
Todd Kjosadc18842017-06-29 12:01:59 -07002579 int ret = 0;
Martijn Coenena056af42017-02-03 14:40:49 -08002580
2581 node = binder_get_node(proc, fp->binder);
2582 if (!node) {
Todd Kjos673068e2017-06-29 12:02:03 -07002583 node = binder_new_node(proc, fp);
Martijn Coenena056af42017-02-03 14:40:49 -08002584 if (!node)
2585 return -ENOMEM;
Martijn Coenena056af42017-02-03 14:40:49 -08002586 }
2587 if (fp->cookie != node->cookie) {
2588 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2589 proc->pid, thread->pid, (u64)fp->binder,
2590 node->debug_id, (u64)fp->cookie,
2591 (u64)node->cookie);
Todd Kjosadc18842017-06-29 12:01:59 -07002592 ret = -EINVAL;
2593 goto done;
Martijn Coenena056af42017-02-03 14:40:49 -08002594 }
Todd Kjosadc18842017-06-29 12:01:59 -07002595 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2596 ret = -EPERM;
2597 goto done;
2598 }
Martijn Coenena056af42017-02-03 14:40:49 -08002599
Todd Kjos372e3142017-06-29 12:01:58 -07002600 ret = binder_inc_ref_for_node(target_proc, node,
2601 fp->hdr.type == BINDER_TYPE_BINDER,
2602 &thread->todo, &rdata);
2603 if (ret)
Todd Kjosadc18842017-06-29 12:01:59 -07002604 goto done;
Martijn Coenena056af42017-02-03 14:40:49 -08002605
2606 if (fp->hdr.type == BINDER_TYPE_BINDER)
2607 fp->hdr.type = BINDER_TYPE_HANDLE;
2608 else
2609 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2610 fp->binder = 0;
Todd Kjos372e3142017-06-29 12:01:58 -07002611 fp->handle = rdata.desc;
Martijn Coenena056af42017-02-03 14:40:49 -08002612 fp->cookie = 0;
Martijn Coenena056af42017-02-03 14:40:49 -08002613
Todd Kjos372e3142017-06-29 12:01:58 -07002614 trace_binder_transaction_node_to_ref(t, node, &rdata);
Martijn Coenena056af42017-02-03 14:40:49 -08002615 binder_debug(BINDER_DEBUG_TRANSACTION,
2616 " node %d u%016llx -> ref %d desc %d\n",
2617 node->debug_id, (u64)node->ptr,
Todd Kjos372e3142017-06-29 12:01:58 -07002618 rdata.debug_id, rdata.desc);
Todd Kjosadc18842017-06-29 12:01:59 -07002619done:
2620 binder_put_node(node);
2621 return ret;
Martijn Coenena056af42017-02-03 14:40:49 -08002622}
2623
2624static int binder_translate_handle(struct flat_binder_object *fp,
2625 struct binder_transaction *t,
2626 struct binder_thread *thread)
2627{
Martijn Coenena056af42017-02-03 14:40:49 -08002628 struct binder_proc *proc = thread->proc;
2629 struct binder_proc *target_proc = t->to_proc;
Todd Kjos372e3142017-06-29 12:01:58 -07002630 struct binder_node *node;
2631 struct binder_ref_data src_rdata;
Todd Kjosadc18842017-06-29 12:01:59 -07002632 int ret = 0;
Martijn Coenena056af42017-02-03 14:40:49 -08002633
Todd Kjos372e3142017-06-29 12:01:58 -07002634 node = binder_get_node_from_ref(proc, fp->handle,
2635 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2636 if (!node) {
Martijn Coenena056af42017-02-03 14:40:49 -08002637 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2638 proc->pid, thread->pid, fp->handle);
2639 return -EINVAL;
2640 }
Todd Kjosadc18842017-06-29 12:01:59 -07002641 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2642 ret = -EPERM;
2643 goto done;
2644 }
Martijn Coenena056af42017-02-03 14:40:49 -08002645
Todd Kjos673068e2017-06-29 12:02:03 -07002646 binder_node_lock(node);
Todd Kjos372e3142017-06-29 12:01:58 -07002647 if (node->proc == target_proc) {
Martijn Coenena056af42017-02-03 14:40:49 -08002648 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2649 fp->hdr.type = BINDER_TYPE_BINDER;
2650 else
2651 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
Todd Kjos372e3142017-06-29 12:01:58 -07002652 fp->binder = node->ptr;
2653 fp->cookie = node->cookie;
Todd Kjos673068e2017-06-29 12:02:03 -07002654 if (node->proc)
2655 binder_inner_proc_lock(node->proc);
2656 binder_inc_node_nilocked(node,
2657 fp->hdr.type == BINDER_TYPE_BINDER,
2658 0, NULL);
2659 if (node->proc)
2660 binder_inner_proc_unlock(node->proc);
Todd Kjos372e3142017-06-29 12:01:58 -07002661 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
Martijn Coenena056af42017-02-03 14:40:49 -08002662 binder_debug(BINDER_DEBUG_TRANSACTION,
2663 " ref %d desc %d -> node %d u%016llx\n",
Todd Kjos372e3142017-06-29 12:01:58 -07002664 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2665 (u64)node->ptr);
Todd Kjos673068e2017-06-29 12:02:03 -07002666 binder_node_unlock(node);
Martijn Coenena056af42017-02-03 14:40:49 -08002667 } else {
Todd Kjos372e3142017-06-29 12:01:58 -07002668 struct binder_ref_data dest_rdata;
Martijn Coenena056af42017-02-03 14:40:49 -08002669
Todd Kjos673068e2017-06-29 12:02:03 -07002670 binder_node_unlock(node);
Todd Kjos372e3142017-06-29 12:01:58 -07002671 ret = binder_inc_ref_for_node(target_proc, node,
2672 fp->hdr.type == BINDER_TYPE_HANDLE,
2673 NULL, &dest_rdata);
2674 if (ret)
Todd Kjosadc18842017-06-29 12:01:59 -07002675 goto done;
Martijn Coenena056af42017-02-03 14:40:49 -08002676
2677 fp->binder = 0;
Todd Kjos372e3142017-06-29 12:01:58 -07002678 fp->handle = dest_rdata.desc;
Martijn Coenena056af42017-02-03 14:40:49 -08002679 fp->cookie = 0;
Todd Kjos372e3142017-06-29 12:01:58 -07002680 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2681 &dest_rdata);
Martijn Coenena056af42017-02-03 14:40:49 -08002682 binder_debug(BINDER_DEBUG_TRANSACTION,
2683 " ref %d desc %d -> ref %d desc %d (node %d)\n",
Todd Kjos372e3142017-06-29 12:01:58 -07002684 src_rdata.debug_id, src_rdata.desc,
2685 dest_rdata.debug_id, dest_rdata.desc,
2686 node->debug_id);
Martijn Coenena056af42017-02-03 14:40:49 -08002687 }
Todd Kjosadc18842017-06-29 12:01:59 -07002688done:
2689 binder_put_node(node);
2690 return ret;
Martijn Coenena056af42017-02-03 14:40:49 -08002691}
2692
2693static int binder_translate_fd(int fd,
2694 struct binder_transaction *t,
2695 struct binder_thread *thread,
2696 struct binder_transaction *in_reply_to)
2697{
2698 struct binder_proc *proc = thread->proc;
2699 struct binder_proc *target_proc = t->to_proc;
2700 int target_fd;
2701 struct file *file;
2702 int ret;
2703 bool target_allows_fd;
2704
2705 if (in_reply_to)
2706 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2707 else
2708 target_allows_fd = t->buffer->target_node->accept_fds;
2709 if (!target_allows_fd) {
2710 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2711 proc->pid, thread->pid,
2712 in_reply_to ? "reply" : "transaction",
2713 fd);
2714 ret = -EPERM;
2715 goto err_fd_not_accepted;
2716 }
2717
2718 file = fget(fd);
2719 if (!file) {
2720 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2721 proc->pid, thread->pid, fd);
2722 ret = -EBADF;
2723 goto err_fget;
2724 }
2725 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2726 if (ret < 0) {
2727 ret = -EPERM;
2728 goto err_security;
2729 }
2730
2731 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2732 if (target_fd < 0) {
2733 ret = -ENOMEM;
2734 goto err_get_unused_fd;
2735 }
2736 task_fd_install(target_proc, target_fd, file);
2737 trace_binder_transaction_fd(t, fd, target_fd);
2738 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2739 fd, target_fd);
2740
2741 return target_fd;
2742
2743err_get_unused_fd:
2744err_security:
2745 fput(file);
2746err_fget:
2747err_fd_not_accepted:
2748 return ret;
2749}
2750
Martijn Coenendef95c72017-02-03 14:40:52 -08002751static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2752 struct binder_buffer_object *parent,
2753 struct binder_transaction *t,
2754 struct binder_thread *thread,
2755 struct binder_transaction *in_reply_to)
2756{
2757 binder_size_t fdi, fd_buf_size, num_installed_fds;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002758 binder_size_t fda_offset;
Martijn Coenendef95c72017-02-03 14:40:52 -08002759 int target_fd;
Martijn Coenendef95c72017-02-03 14:40:52 -08002760 struct binder_proc *proc = thread->proc;
2761 struct binder_proc *target_proc = t->to_proc;
2762
2763 fd_buf_size = sizeof(u32) * fda->num_fds;
2764 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2765 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2766 proc->pid, thread->pid, (u64)fda->num_fds);
2767 return -EINVAL;
2768 }
2769 if (fd_buf_size > parent->length ||
2770 fda->parent_offset > parent->length - fd_buf_size) {
2771 /* No space for all file descriptors here. */
2772 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2773 proc->pid, thread->pid, (u64)fda->num_fds);
2774 return -EINVAL;
2775 }
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002776 /*
2777 * the source data for binder_buffer_object is visible
2778 * to user-space and the @buffer element is the user
2779 * pointer to the buffer_object containing the fd_array.
2780 * Convert the address to an offset relative to
2781 * the base of the transaction buffer.
2782 */
2783 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2784 fda->parent_offset;
2785 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
Martijn Coenendef95c72017-02-03 14:40:52 -08002786 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2787 proc->pid, thread->pid);
2788 return -EINVAL;
2789 }
2790 for (fdi = 0; fdi < fda->num_fds; fdi++) {
Todd Kjos06c24272019-02-08 10:35:15 -08002791 u32 fd;
Todd Kjos1637f8d2019-03-27 16:12:31 -07002792
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002793 binder_size_t offset = fda_offset + fdi * sizeof(fd);
Todd Kjos06c24272019-02-08 10:35:15 -08002794
2795 binder_alloc_copy_from_buffer(&target_proc->alloc,
2796 &fd, t->buffer,
2797 offset, sizeof(fd));
2798 target_fd = binder_translate_fd(fd, t, thread, in_reply_to);
Martijn Coenendef95c72017-02-03 14:40:52 -08002799 if (target_fd < 0)
2800 goto err_translate_fd_failed;
Todd Kjos06c24272019-02-08 10:35:15 -08002801 binder_alloc_copy_to_buffer(&target_proc->alloc,
2802 t->buffer, offset,
2803 &target_fd, sizeof(fd));
Martijn Coenendef95c72017-02-03 14:40:52 -08002804 }
2805 return 0;
2806
2807err_translate_fd_failed:
2808 /*
2809 * Failed to allocate fd or security error, free fds
2810 * installed so far.
2811 */
2812 num_installed_fds = fdi;
Todd Kjos06c24272019-02-08 10:35:15 -08002813 for (fdi = 0; fdi < num_installed_fds; fdi++) {
2814 u32 fd;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002815 binder_size_t offset = fda_offset + fdi * sizeof(fd);
Todd Kjos06c24272019-02-08 10:35:15 -08002816 binder_alloc_copy_from_buffer(&target_proc->alloc,
2817 &fd, t->buffer,
2818 offset, sizeof(fd));
2819 task_close_fd(target_proc, fd);
2820 }
Martijn Coenendef95c72017-02-03 14:40:52 -08002821 return target_fd;
2822}
2823
Martijn Coenen79802402017-02-03 14:40:51 -08002824static int binder_fixup_parent(struct binder_transaction *t,
2825 struct binder_thread *thread,
2826 struct binder_buffer_object *bp,
Todd Kjosbaac2252019-02-08 10:35:17 -08002827 binder_size_t off_start_offset,
Martijn Coenen79802402017-02-03 14:40:51 -08002828 binder_size_t num_valid,
Todd Kjosbaac2252019-02-08 10:35:17 -08002829 binder_size_t last_fixup_obj_off,
Martijn Coenen79802402017-02-03 14:40:51 -08002830 binder_size_t last_fixup_min_off)
2831{
2832 struct binder_buffer_object *parent;
Martijn Coenen79802402017-02-03 14:40:51 -08002833 struct binder_buffer *b = t->buffer;
2834 struct binder_proc *proc = thread->proc;
2835 struct binder_proc *target_proc = t->to_proc;
Todd Kjosbaac2252019-02-08 10:35:17 -08002836 struct binder_object object;
2837 binder_size_t buffer_offset;
2838 binder_size_t parent_offset;
Martijn Coenen79802402017-02-03 14:40:51 -08002839
2840 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2841 return 0;
2842
Todd Kjosbaac2252019-02-08 10:35:17 -08002843 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2844 off_start_offset, &parent_offset,
2845 num_valid);
Martijn Coenen79802402017-02-03 14:40:51 -08002846 if (!parent) {
2847 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2848 proc->pid, thread->pid);
2849 return -EINVAL;
2850 }
2851
Todd Kjosbaac2252019-02-08 10:35:17 -08002852 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2853 parent_offset, bp->parent_offset,
2854 last_fixup_obj_off,
Martijn Coenen79802402017-02-03 14:40:51 -08002855 last_fixup_min_off)) {
2856 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2857 proc->pid, thread->pid);
2858 return -EINVAL;
2859 }
2860
2861 if (parent->length < sizeof(binder_uintptr_t) ||
2862 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2863 /* No space for a pointer here! */
2864 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2865 proc->pid, thread->pid);
2866 return -EINVAL;
2867 }
Todd Kjosbaac2252019-02-08 10:35:17 -08002868 buffer_offset = bp->parent_offset +
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002869 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
Todd Kjosbaac2252019-02-08 10:35:17 -08002870 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2871 &bp->buffer, sizeof(bp->buffer));
Martijn Coenen79802402017-02-03 14:40:51 -08002872
2873 return 0;
2874}
2875
Martijn Coenen408c68b2017-08-31 10:04:19 +02002876/**
2877 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2878 * @t: transaction to send
2879 * @proc: process to send the transaction to
2880 * @thread: thread in @proc to send the transaction to (may be NULL)
2881 *
2882 * This function queues a transaction to the specified process. It will try
2883 * to find a thread in the target process to handle the transaction and
2884 * wake it up. If no thread is found, the work is queued to the proc
2885 * waitqueue.
2886 *
2887 * If the @thread parameter is not NULL, the transaction is always queued
2888 * to the waitlist of that specific thread.
2889 *
2890 * Return: true if the transactions was successfully queued
2891 * false if the target process or thread is dead
2892 */
2893static bool binder_proc_transaction(struct binder_transaction *t,
2894 struct binder_proc *proc,
2895 struct binder_thread *thread)
2896{
Martijn Coenen408c68b2017-08-31 10:04:19 +02002897 struct binder_node *node = t->buffer->target_node;
Martijn Coenence388e02017-06-06 17:04:42 -07002898 struct binder_priority node_prio;
Martijn Coenen408c68b2017-08-31 10:04:19 +02002899 bool oneway = !!(t->flags & TF_ONE_WAY);
Martijn Coenen148ade22017-11-15 09:21:35 +01002900 bool pending_async = false;
Martijn Coenen408c68b2017-08-31 10:04:19 +02002901
2902 BUG_ON(!node);
2903 binder_node_lock(node);
Martijn Coenence388e02017-06-06 17:04:42 -07002904 node_prio.prio = node->min_priority;
2905 node_prio.sched_policy = node->sched_policy;
2906
Martijn Coenen408c68b2017-08-31 10:04:19 +02002907 if (oneway) {
2908 BUG_ON(thread);
2909 if (node->has_async_transaction) {
Martijn Coenen148ade22017-11-15 09:21:35 +01002910 pending_async = true;
Martijn Coenen408c68b2017-08-31 10:04:19 +02002911 } else {
Gustavo A. R. Silva197410a2018-01-23 12:04:27 -06002912 node->has_async_transaction = true;
Martijn Coenen408c68b2017-08-31 10:04:19 +02002913 }
2914 }
2915
2916 binder_inner_proc_lock(proc);
2917
2918 if (proc->is_dead || (thread && thread->is_dead)) {
2919 binder_inner_proc_unlock(proc);
2920 binder_node_unlock(node);
2921 return false;
2922 }
2923
Martijn Coenen148ade22017-11-15 09:21:35 +01002924 if (!thread && !pending_async)
Martijn Coenen408c68b2017-08-31 10:04:19 +02002925 thread = binder_select_thread_ilocked(proc);
2926
Martijn Coenence388e02017-06-06 17:04:42 -07002927 if (thread) {
2928 binder_transaction_priority(thread->task, t, node_prio,
2929 node->inherit_rt);
Martijn Coenen148ade22017-11-15 09:21:35 +01002930 binder_enqueue_thread_work_ilocked(thread, &t->work);
Martijn Coenence388e02017-06-06 17:04:42 -07002931 } else if (!pending_async) {
Martijn Coenen148ade22017-11-15 09:21:35 +01002932 binder_enqueue_work_ilocked(&t->work, &proc->todo);
Martijn Coenence388e02017-06-06 17:04:42 -07002933 } else {
Martijn Coenen148ade22017-11-15 09:21:35 +01002934 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
Martijn Coenence388e02017-06-06 17:04:42 -07002935 }
Martijn Coenen408c68b2017-08-31 10:04:19 +02002936
Martijn Coenen148ade22017-11-15 09:21:35 +01002937 if (!pending_async)
Martijn Coenen408c68b2017-08-31 10:04:19 +02002938 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2939
2940 binder_inner_proc_unlock(proc);
2941 binder_node_unlock(node);
2942
2943 return true;
2944}
2945
Todd Kjos512cf462017-09-29 15:39:49 -07002946/**
2947 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2948 * @node: struct binder_node for which to get refs
2949 * @proc: returns @node->proc if valid
2950 * @error: if no @proc then returns BR_DEAD_REPLY
2951 *
2952 * User-space normally keeps the node alive when creating a transaction
2953 * since it has a reference to the target. The local strong ref keeps it
2954 * alive if the sending process dies before the target process processes
2955 * the transaction. If the source process is malicious or has a reference
2956 * counting bug, relying on the local strong ref can fail.
2957 *
2958 * Since user-space can cause the local strong ref to go away, we also take
2959 * a tmpref on the node to ensure it survives while we are constructing
2960 * the transaction. We also need a tmpref on the proc while we are
2961 * constructing the transaction, so we take that here as well.
2962 *
2963 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2964 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2965 * target proc has died, @error is set to BR_DEAD_REPLY
2966 */
2967static struct binder_node *binder_get_node_refs_for_txn(
2968 struct binder_node *node,
2969 struct binder_proc **procp,
2970 uint32_t *error)
2971{
2972 struct binder_node *target_node = NULL;
2973
2974 binder_node_inner_lock(node);
2975 if (node->proc) {
2976 target_node = node;
2977 binder_inc_node_nilocked(node, 1, 0, NULL);
2978 binder_inc_node_tmpref_ilocked(node);
2979 node->proc->tmp_ref++;
2980 *procp = node->proc;
2981 } else
2982 *error = BR_DEAD_REPLY;
2983 binder_node_inner_unlock(node);
2984
2985 return target_node;
2986}
2987
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002988static void binder_transaction(struct binder_proc *proc,
2989 struct binder_thread *thread,
Martijn Coenen4bfac802017-02-03 14:40:50 -08002990 struct binder_transaction_data *tr, int reply,
2991 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002992{
Martijn Coenena056af42017-02-03 14:40:49 -08002993 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002994 struct binder_transaction *t;
Sherry Yangb0cb2d82018-08-13 17:28:53 -07002995 struct binder_work *w;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002996 struct binder_work *tcomplete;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08002997 binder_size_t buffer_offset = 0;
2998 binder_size_t off_start_offset, off_end_offset;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08002999 binder_size_t off_min;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003000 binder_size_t sg_buf_offset, sg_buf_end_offset;
Todd Kjos7a4408c2017-06-29 12:01:57 -07003001 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003002 struct binder_thread *target_thread = NULL;
3003 struct binder_node *target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003004 struct binder_transaction *in_reply_to = NULL;
3005 struct binder_transaction_log_entry *e;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003006 uint32_t return_error = 0;
3007 uint32_t return_error_param = 0;
3008 uint32_t return_error_line = 0;
Todd Kjosbaac2252019-02-08 10:35:17 -08003009 binder_size_t last_fixup_obj_off = 0;
Martijn Coenen79802402017-02-03 14:40:51 -08003010 binder_size_t last_fixup_min_off = 0;
Martijn Coenen342e5c92017-02-03 14:40:46 -08003011 struct binder_context *context = proc->context;
Todd Kjosd99c7332017-06-29 12:01:53 -07003012 int t_debug_id = atomic_inc_return(&binder_last_id);
Todd Kjos00bac142019-01-14 09:10:21 -08003013 char *secctx = NULL;
3014 u32 secctx_sz = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003015
3016 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjosd99c7332017-06-29 12:01:53 -07003017 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003018 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3019 e->from_proc = proc->pid;
3020 e->from_thread = thread->pid;
3021 e->target_handle = tr->target.handle;
3022 e->data_size = tr->data_size;
3023 e->offsets_size = tr->offsets_size;
Martijn Coenen14db3182017-02-03 14:40:47 -08003024 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003025
3026 if (reply) {
Martijn Coenen0b89d692017-06-29 12:02:06 -07003027 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003028 in_reply_to = thread->transaction_stack;
3029 if (in_reply_to == NULL) {
Martijn Coenen0b89d692017-06-29 12:02:06 -07003030 binder_inner_proc_unlock(proc);
Anmol Sarma56b468f2012-10-30 22:35:43 +05303031 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003032 proc->pid, thread->pid);
3033 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003034 return_error_param = -EPROTO;
3035 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003036 goto err_empty_call_stack;
3037 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003038 if (in_reply_to->to_thread != thread) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07003039 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05303040 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003041 proc->pid, thread->pid, in_reply_to->debug_id,
3042 in_reply_to->to_proc ?
3043 in_reply_to->to_proc->pid : 0,
3044 in_reply_to->to_thread ?
3045 in_reply_to->to_thread->pid : 0);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003046 spin_unlock(&in_reply_to->lock);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003047 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003048 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003049 return_error_param = -EPROTO;
3050 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003051 in_reply_to = NULL;
3052 goto err_bad_call_stack;
3053 }
3054 thread->transaction_stack = in_reply_to->to_parent;
Martijn Coenen0b89d692017-06-29 12:02:06 -07003055 binder_inner_proc_unlock(proc);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003056 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003057 if (target_thread == NULL) {
3058 return_error = BR_DEAD_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003059 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003060 goto err_dead_binder;
3061 }
3062 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303063 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003064 proc->pid, thread->pid,
3065 target_thread->transaction_stack ?
3066 target_thread->transaction_stack->debug_id : 0,
3067 in_reply_to->debug_id);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003068 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003069 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003070 return_error_param = -EPROTO;
3071 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003072 in_reply_to = NULL;
3073 target_thread = NULL;
3074 goto err_dead_binder;
3075 }
3076 target_proc = target_thread->proc;
Todd Kjos7a4408c2017-06-29 12:01:57 -07003077 target_proc->tmp_ref++;
Martijn Coenen0b89d692017-06-29 12:02:06 -07003078 binder_inner_proc_unlock(target_thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003079 } else {
3080 if (tr->target.handle) {
3081 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09003082
Todd Kjoseb349832017-06-29 12:01:56 -07003083 /*
3084 * There must already be a strong ref
3085 * on this node. If so, do a strong
3086 * increment on the node to ensure it
3087 * stays alive until the transaction is
3088 * done.
3089 */
Todd Kjos2c1838d2017-06-29 12:02:08 -07003090 binder_proc_lock(proc);
3091 ref = binder_get_ref_olocked(proc, tr->target.handle,
3092 true);
Todd Kjoseb349832017-06-29 12:01:56 -07003093 if (ref) {
Todd Kjos512cf462017-09-29 15:39:49 -07003094 target_node = binder_get_node_refs_for_txn(
3095 ref->node, &target_proc,
3096 &return_error);
3097 } else {
3098 binder_user_error("%d:%d got transaction to invalid handle\n",
3099 proc->pid, thread->pid);
3100 return_error = BR_FAILED_REPLY;
Todd Kjoseb349832017-06-29 12:01:56 -07003101 }
Todd Kjos2c1838d2017-06-29 12:02:08 -07003102 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003103 } else {
Todd Kjosc44b1232017-06-29 12:01:43 -07003104 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen342e5c92017-02-03 14:40:46 -08003105 target_node = context->binder_context_mgr_node;
Todd Kjos512cf462017-09-29 15:39:49 -07003106 if (target_node)
3107 target_node = binder_get_node_refs_for_txn(
3108 target_node, &target_proc,
3109 &return_error);
3110 else
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003111 return_error = BR_DEAD_REPLY;
Todd Kjosc44b1232017-06-29 12:01:43 -07003112 mutex_unlock(&context->context_mgr_node_lock);
Hridya Valsarajue907b132019-07-15 12:18:04 -07003113 if (target_node && target_proc->pid == proc->pid) {
Martijn Coenen7aa135f2018-03-28 11:14:50 +02003114 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3115 proc->pid, thread->pid);
3116 return_error = BR_FAILED_REPLY;
3117 return_error_param = -EINVAL;
3118 return_error_line = __LINE__;
3119 goto err_invalid_target_handle;
3120 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003121 }
Todd Kjos512cf462017-09-29 15:39:49 -07003122 if (!target_node) {
3123 /*
3124 * return_error is set above
3125 */
3126 return_error_param = -EINVAL;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003127 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003128 goto err_dead_binder;
3129 }
Todd Kjos512cf462017-09-29 15:39:49 -07003130 e->to_node = target_node->debug_id;
Stephen Smalley79af7302015-01-21 10:54:10 -05003131 if (security_binder_transaction(proc->tsk,
3132 target_proc->tsk) < 0) {
3133 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003134 return_error_param = -EPERM;
3135 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05003136 goto err_invalid_target_handle;
3137 }
Martijn Coenen0b89d692017-06-29 12:02:06 -07003138 binder_inner_proc_lock(proc);
Sherry Yangb0cb2d82018-08-13 17:28:53 -07003139
3140 w = list_first_entry_or_null(&thread->todo,
3141 struct binder_work, entry);
3142 if (!(tr->flags & TF_ONE_WAY) && w &&
3143 w->type == BINDER_WORK_TRANSACTION) {
3144 /*
3145 * Do not allow new outgoing transaction from a
3146 * thread that has a transaction at the head of
3147 * its todo list. Only need to check the head
3148 * because binder_select_thread_ilocked picks a
3149 * thread from proc->waiting_threads to enqueue
3150 * the transaction, and nothing is queued to the
3151 * todo list while the thread is on waiting_threads.
3152 */
3153 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3154 proc->pid, thread->pid);
3155 binder_inner_proc_unlock(proc);
3156 return_error = BR_FAILED_REPLY;
3157 return_error_param = -EPROTO;
3158 return_error_line = __LINE__;
3159 goto err_bad_todo_list;
3160 }
3161
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003162 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3163 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003164
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003165 tmp = thread->transaction_stack;
3166 if (tmp->to_thread != thread) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07003167 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05303168 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003169 proc->pid, thread->pid, tmp->debug_id,
3170 tmp->to_proc ? tmp->to_proc->pid : 0,
3171 tmp->to_thread ?
3172 tmp->to_thread->pid : 0);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003173 spin_unlock(&tmp->lock);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003174 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003175 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003176 return_error_param = -EPROTO;
3177 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003178 goto err_bad_call_stack;
3179 }
3180 while (tmp) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07003181 struct binder_thread *from;
3182
3183 spin_lock(&tmp->lock);
3184 from = tmp->from;
3185 if (from && from->proc == target_proc) {
3186 atomic_inc(&from->tmp_ref);
3187 target_thread = from;
3188 spin_unlock(&tmp->lock);
3189 break;
3190 }
3191 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003192 tmp = tmp->from_parent;
3193 }
3194 }
Martijn Coenen0b89d692017-06-29 12:02:06 -07003195 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003196 }
Martijn Coenen408c68b2017-08-31 10:04:19 +02003197 if (target_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003198 e->to_thread = target_thread->pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003199 e->to_proc = target_proc->pid;
3200
3201 /* TODO: reuse incoming transaction for reply */
3202 t = kzalloc(sizeof(*t), GFP_KERNEL);
3203 if (t == NULL) {
3204 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003205 return_error_param = -ENOMEM;
3206 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003207 goto err_alloc_t_failed;
3208 }
3209 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003210 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003211
3212 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3213 if (tcomplete == NULL) {
3214 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003215 return_error_param = -ENOMEM;
3216 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003217 goto err_alloc_tcomplete_failed;
3218 }
3219 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3220
Todd Kjosd99c7332017-06-29 12:01:53 -07003221 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003222
3223 if (reply)
3224 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen4bfac802017-02-03 14:40:50 -08003225 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003226 proc->pid, thread->pid, t->debug_id,
3227 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003228 (u64)tr->data.ptr.buffer,
3229 (u64)tr->data.ptr.offsets,
Martijn Coenen4bfac802017-02-03 14:40:50 -08003230 (u64)tr->data_size, (u64)tr->offsets_size,
3231 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003232 else
3233 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen4bfac802017-02-03 14:40:50 -08003234 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003235 proc->pid, thread->pid, t->debug_id,
3236 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003237 (u64)tr->data.ptr.buffer,
3238 (u64)tr->data.ptr.offsets,
Martijn Coenen4bfac802017-02-03 14:40:50 -08003239 (u64)tr->data_size, (u64)tr->offsets_size,
3240 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003241
3242 if (!reply && !(tr->flags & TF_ONE_WAY))
3243 t->from = thread;
3244 else
3245 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03003246 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003247 t->to_proc = target_proc;
3248 t->to_thread = target_thread;
3249 t->code = tr->code;
3250 t->flags = tr->flags;
Martijn Coenence388e02017-06-06 17:04:42 -07003251 if (!(t->flags & TF_ONE_WAY) &&
3252 binder_supported_policy(current->policy)) {
3253 /* Inherit supported policies for synchronous transactions */
3254 t->priority.sched_policy = current->policy;
3255 t->priority.prio = current->normal_prio;
3256 } else {
3257 /* Otherwise, fall back to the default priority */
3258 t->priority = target_proc->default_priority;
3259 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003260
Todd Kjos00bac142019-01-14 09:10:21 -08003261 if (target_node && target_node->txn_security_ctx) {
3262 u32 secid;
Todd Kjos8ae84852019-04-24 12:31:18 -07003263 size_t added_size;
Todd Kjos00bac142019-01-14 09:10:21 -08003264
3265 security_task_getsecid(proc->tsk, &secid);
3266 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3267 if (ret) {
3268 return_error = BR_FAILED_REPLY;
3269 return_error_param = ret;
3270 return_error_line = __LINE__;
3271 goto err_get_secctx_failed;
3272 }
Todd Kjos8ae84852019-04-24 12:31:18 -07003273 added_size = ALIGN(secctx_sz, sizeof(u64));
3274 extra_buffers_size += added_size;
3275 if (extra_buffers_size < added_size) {
3276 /* integer overflow of extra_buffers_size */
3277 return_error = BR_FAILED_REPLY;
3278 return_error_param = EINVAL;
3279 return_error_line = __LINE__;
3280 goto err_bad_extra_size;
3281 }
Todd Kjos00bac142019-01-14 09:10:21 -08003282 }
3283
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003284 trace_binder_transaction(reply, t, target_node);
3285
Todd Kjos19c98722017-06-29 12:01:40 -07003286 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen4bfac802017-02-03 14:40:50 -08003287 tr->offsets_size, extra_buffers_size,
3288 !reply && (t->flags & TF_ONE_WAY));
Todd Kjos57ada2f2017-06-29 12:01:46 -07003289 if (IS_ERR(t->buffer)) {
3290 /*
3291 * -ESRCH indicates VMA cleared. The target is dying.
3292 */
3293 return_error_param = PTR_ERR(t->buffer);
3294 return_error = return_error_param == -ESRCH ?
3295 BR_DEAD_REPLY : BR_FAILED_REPLY;
3296 return_error_line = __LINE__;
3297 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003298 goto err_binder_alloc_buf_failed;
3299 }
Todd Kjos00bac142019-01-14 09:10:21 -08003300 if (secctx) {
3301 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3302 ALIGN(tr->offsets_size, sizeof(void *)) +
3303 ALIGN(extra_buffers_size, sizeof(void *)) -
3304 ALIGN(secctx_sz, sizeof(u64));
Todd Kjos00bac142019-01-14 09:10:21 -08003305
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003306 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
Todd Kjos06c24272019-02-08 10:35:15 -08003307 binder_alloc_copy_to_buffer(&target_proc->alloc,
3308 t->buffer, buf_offset,
3309 secctx, secctx_sz);
Todd Kjos00bac142019-01-14 09:10:21 -08003310 security_release_secctx(secctx, secctx_sz);
3311 secctx = NULL;
3312 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003313 t->buffer->debug_id = t->debug_id;
3314 t->buffer->transaction = t;
3315 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003316 trace_binder_transaction_alloc_buf(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003317
Todd Kjos5f245a92019-02-08 10:35:14 -08003318 if (binder_alloc_copy_user_to_buffer(
3319 &target_proc->alloc,
3320 t->buffer, 0,
3321 (const void __user *)
3322 (uintptr_t)tr->data.ptr.buffer,
3323 tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303324 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3325 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003326 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003327 return_error_param = -EFAULT;
3328 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003329 goto err_copy_data_failed;
3330 }
Todd Kjos5f245a92019-02-08 10:35:14 -08003331 if (binder_alloc_copy_user_to_buffer(
3332 &target_proc->alloc,
3333 t->buffer,
3334 ALIGN(tr->data_size, sizeof(void *)),
3335 (const void __user *)
3336 (uintptr_t)tr->data.ptr.offsets,
3337 tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303338 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3339 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003340 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003341 return_error_param = -EFAULT;
3342 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003343 goto err_copy_data_failed;
3344 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08003345 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3346 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3347 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003348 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003349 return_error_param = -EINVAL;
3350 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003351 goto err_bad_offset;
3352 }
Martijn Coenen79802402017-02-03 14:40:51 -08003353 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3354 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3355 proc->pid, thread->pid,
3356 (u64)extra_buffers_size);
3357 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003358 return_error_param = -EINVAL;
3359 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08003360 goto err_bad_offset;
3361 }
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003362 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3363 buffer_offset = off_start_offset;
3364 off_end_offset = off_start_offset + tr->offsets_size;
3365 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
Martijn Coenen0fa35bc2019-07-09 13:09:23 +02003366 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3367 ALIGN(secctx_sz, sizeof(u64));
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003368 off_min = 0;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003369 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3370 buffer_offset += sizeof(binder_size_t)) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08003371 struct binder_object_header *hdr;
Todd Kjos06c24272019-02-08 10:35:15 -08003372 size_t object_size;
Todd Kjosa08646b2019-02-08 10:35:16 -08003373 struct binder_object object;
Todd Kjos06c24272019-02-08 10:35:15 -08003374 binder_size_t object_offset;
Seunghun Lee10f62862014-05-01 01:30:23 +09003375
Todd Kjos06c24272019-02-08 10:35:15 -08003376 binder_alloc_copy_from_buffer(&target_proc->alloc,
3377 &object_offset,
3378 t->buffer,
3379 buffer_offset,
3380 sizeof(object_offset));
Todd Kjosa08646b2019-02-08 10:35:16 -08003381 object_size = binder_get_object(target_proc, t->buffer,
3382 object_offset, &object);
Todd Kjos06c24272019-02-08 10:35:15 -08003383 if (object_size == 0 || object_offset < off_min) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08003384 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Todd Kjos06c24272019-02-08 10:35:15 -08003385 proc->pid, thread->pid,
3386 (u64)object_offset,
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08003387 (u64)off_min,
Martijn Coenenfeba3902017-02-03 14:40:45 -08003388 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003389 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003390 return_error_param = -EINVAL;
3391 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003392 goto err_bad_offset;
3393 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08003394
Todd Kjosa08646b2019-02-08 10:35:16 -08003395 hdr = &object.hdr;
Todd Kjos06c24272019-02-08 10:35:15 -08003396 off_min = object_offset + object_size;
Martijn Coenenfeba3902017-02-03 14:40:45 -08003397 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003398 case BINDER_TYPE_BINDER:
3399 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08003400 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09003401
Martijn Coenenfeba3902017-02-03 14:40:45 -08003402 fp = to_flat_binder_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08003403 ret = binder_translate_binder(fp, t, thread);
3404 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02003405 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003406 return_error_param = ret;
3407 return_error_line = __LINE__;
Martijn Coenena056af42017-02-03 14:40:49 -08003408 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003409 }
Todd Kjosa08646b2019-02-08 10:35:16 -08003410 binder_alloc_copy_to_buffer(&target_proc->alloc,
3411 t->buffer, object_offset,
3412 fp, sizeof(*fp));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003413 } break;
3414 case BINDER_TYPE_HANDLE:
3415 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08003416 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02003417
Martijn Coenenfeba3902017-02-03 14:40:45 -08003418 fp = to_flat_binder_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08003419 ret = binder_translate_handle(fp, t, thread);
3420 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003421 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003422 return_error_param = ret;
3423 return_error_line = __LINE__;
Martijn Coenena056af42017-02-03 14:40:49 -08003424 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003425 }
Todd Kjosa08646b2019-02-08 10:35:16 -08003426 binder_alloc_copy_to_buffer(&target_proc->alloc,
3427 t->buffer, object_offset,
3428 fp, sizeof(*fp));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003429 } break;
3430
3431 case BINDER_TYPE_FD: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08003432 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08003433 int target_fd = binder_translate_fd(fp->fd, t, thread,
3434 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003435
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003436 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003437 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003438 return_error_param = target_fd;
3439 return_error_line = __LINE__;
Martijn Coenena056af42017-02-03 14:40:49 -08003440 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003441 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08003442 fp->pad_binder = 0;
3443 fp->fd = target_fd;
Todd Kjosa08646b2019-02-08 10:35:16 -08003444 binder_alloc_copy_to_buffer(&target_proc->alloc,
3445 t->buffer, object_offset,
3446 fp, sizeof(*fp));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003447 } break;
Martijn Coenendef95c72017-02-03 14:40:52 -08003448 case BINDER_TYPE_FDA: {
Todd Kjosbaac2252019-02-08 10:35:17 -08003449 struct binder_object ptr_object;
3450 binder_size_t parent_offset;
Martijn Coenendef95c72017-02-03 14:40:52 -08003451 struct binder_fd_array_object *fda =
3452 to_binder_fd_array_object(hdr);
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003453 size_t num_valid = (buffer_offset - off_start_offset) *
3454 sizeof(binder_size_t);
Martijn Coenendef95c72017-02-03 14:40:52 -08003455 struct binder_buffer_object *parent =
Todd Kjosbaac2252019-02-08 10:35:17 -08003456 binder_validate_ptr(target_proc, t->buffer,
3457 &ptr_object, fda->parent,
3458 off_start_offset,
3459 &parent_offset,
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003460 num_valid);
Martijn Coenendef95c72017-02-03 14:40:52 -08003461 if (!parent) {
3462 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3463 proc->pid, thread->pid);
3464 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003465 return_error_param = -EINVAL;
3466 return_error_line = __LINE__;
Martijn Coenendef95c72017-02-03 14:40:52 -08003467 goto err_bad_parent;
3468 }
Todd Kjosbaac2252019-02-08 10:35:17 -08003469 if (!binder_validate_fixup(target_proc, t->buffer,
3470 off_start_offset,
3471 parent_offset,
3472 fda->parent_offset,
3473 last_fixup_obj_off,
Martijn Coenendef95c72017-02-03 14:40:52 -08003474 last_fixup_min_off)) {
3475 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3476 proc->pid, thread->pid);
3477 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003478 return_error_param = -EINVAL;
3479 return_error_line = __LINE__;
Martijn Coenendef95c72017-02-03 14:40:52 -08003480 goto err_bad_parent;
3481 }
3482 ret = binder_translate_fd_array(fda, parent, t, thread,
3483 in_reply_to);
3484 if (ret < 0) {
3485 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003486 return_error_param = ret;
3487 return_error_line = __LINE__;
Martijn Coenendef95c72017-02-03 14:40:52 -08003488 goto err_translate_failed;
3489 }
Todd Kjosbaac2252019-02-08 10:35:17 -08003490 last_fixup_obj_off = parent_offset;
Martijn Coenendef95c72017-02-03 14:40:52 -08003491 last_fixup_min_off =
3492 fda->parent_offset + sizeof(u32) * fda->num_fds;
3493 } break;
Martijn Coenen79802402017-02-03 14:40:51 -08003494 case BINDER_TYPE_PTR: {
3495 struct binder_buffer_object *bp =
3496 to_binder_buffer_object(hdr);
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003497 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3498 size_t num_valid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003499
Martijn Coenen79802402017-02-03 14:40:51 -08003500 if (bp->length > buf_left) {
3501 binder_user_error("%d:%d got transaction with too large buffer\n",
3502 proc->pid, thread->pid);
3503 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003504 return_error_param = -EINVAL;
3505 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08003506 goto err_bad_offset;
3507 }
Todd Kjos5f245a92019-02-08 10:35:14 -08003508 if (binder_alloc_copy_user_to_buffer(
3509 &target_proc->alloc,
3510 t->buffer,
3511 sg_buf_offset,
3512 (const void __user *)
3513 (uintptr_t)bp->buffer,
3514 bp->length)) {
Martijn Coenen79802402017-02-03 14:40:51 -08003515 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3516 proc->pid, thread->pid);
Todd Kjos57ada2f2017-06-29 12:01:46 -07003517 return_error_param = -EFAULT;
Martijn Coenen79802402017-02-03 14:40:51 -08003518 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003519 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08003520 goto err_copy_data_failed;
3521 }
3522 /* Fixup buffer pointer to target proc address space */
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003523 bp->buffer = (uintptr_t)
3524 t->buffer->user_data + sg_buf_offset;
3525 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
Martijn Coenen79802402017-02-03 14:40:51 -08003526
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003527 num_valid = (buffer_offset - off_start_offset) *
3528 sizeof(binder_size_t);
Todd Kjosbaac2252019-02-08 10:35:17 -08003529 ret = binder_fixup_parent(t, thread, bp,
3530 off_start_offset,
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003531 num_valid,
Todd Kjosbaac2252019-02-08 10:35:17 -08003532 last_fixup_obj_off,
Martijn Coenen79802402017-02-03 14:40:51 -08003533 last_fixup_min_off);
3534 if (ret < 0) {
3535 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003536 return_error_param = ret;
3537 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08003538 goto err_translate_failed;
3539 }
Todd Kjosa08646b2019-02-08 10:35:16 -08003540 binder_alloc_copy_to_buffer(&target_proc->alloc,
3541 t->buffer, object_offset,
3542 bp, sizeof(*bp));
Todd Kjosbaac2252019-02-08 10:35:17 -08003543 last_fixup_obj_off = object_offset;
Martijn Coenen79802402017-02-03 14:40:51 -08003544 last_fixup_min_off = 0;
3545 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003546 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01003547 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08003548 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003549 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07003550 return_error_param = -EINVAL;
3551 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003552 goto err_bad_object_type;
3553 }
3554 }
Todd Kjosccae6f62017-06-29 12:01:48 -07003555 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
Todd Kjos673068e2017-06-29 12:02:03 -07003556 t->work.type = BINDER_WORK_TRANSACTION;
Todd Kjosccae6f62017-06-29 12:01:48 -07003557
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003558 if (reply) {
Martijn Coenen148ade22017-11-15 09:21:35 +01003559 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003560 binder_inner_proc_lock(target_proc);
3561 if (target_thread->is_dead) {
3562 binder_inner_proc_unlock(target_proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003563 goto err_dead_proc_or_thread;
Martijn Coenen0b89d692017-06-29 12:02:06 -07003564 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003565 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003566 binder_pop_transaction_ilocked(target_thread, in_reply_to);
Martijn Coenen148ade22017-11-15 09:21:35 +01003567 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003568 binder_inner_proc_unlock(target_proc);
Martijn Coenen408c68b2017-08-31 10:04:19 +02003569 wake_up_interruptible_sync(&target_thread->wait);
Martijn Coenence388e02017-06-06 17:04:42 -07003570 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjosb6d282c2017-06-29 12:01:54 -07003571 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003572 } else if (!(t->flags & TF_ONE_WAY)) {
3573 BUG_ON(t->buffer->async_transaction != 0);
Martijn Coenen0b89d692017-06-29 12:02:06 -07003574 binder_inner_proc_lock(proc);
Martijn Coenen148ade22017-11-15 09:21:35 +01003575 /*
3576 * Defer the TRANSACTION_COMPLETE, so we don't return to
3577 * userspace immediately; this allows the target process to
3578 * immediately start processing this transaction, reducing
3579 * latency. We will then return the TRANSACTION_COMPLETE when
3580 * the target replies (or there is an error).
3581 */
3582 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003583 t->need_reply = 1;
3584 t->from_parent = thread->transaction_stack;
3585 thread->transaction_stack = t;
Martijn Coenen0b89d692017-06-29 12:02:06 -07003586 binder_inner_proc_unlock(proc);
Martijn Coenen408c68b2017-08-31 10:04:19 +02003587 if (!binder_proc_transaction(t, target_proc, target_thread)) {
Martijn Coenen0b89d692017-06-29 12:02:06 -07003588 binder_inner_proc_lock(proc);
3589 binder_pop_transaction_ilocked(thread, t);
3590 binder_inner_proc_unlock(proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003591 goto err_dead_proc_or_thread;
3592 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003593 } else {
3594 BUG_ON(target_node == NULL);
3595 BUG_ON(t->buffer->async_transaction != 1);
Martijn Coenen148ade22017-11-15 09:21:35 +01003596 binder_enqueue_thread_work(thread, tcomplete);
Martijn Coenen408c68b2017-08-31 10:04:19 +02003597 if (!binder_proc_transaction(t, target_proc, NULL))
Todd Kjos7a4408c2017-06-29 12:01:57 -07003598 goto err_dead_proc_or_thread;
Riley Andrews00b40d62017-06-29 12:01:37 -07003599 }
Todd Kjos7a4408c2017-06-29 12:01:57 -07003600 if (target_thread)
3601 binder_thread_dec_tmpref(target_thread);
3602 binder_proc_dec_tmpref(target_proc);
Todd Kjos512cf462017-09-29 15:39:49 -07003603 if (target_node)
3604 binder_dec_node_tmpref(target_node);
Todd Kjosd99c7332017-06-29 12:01:53 -07003605 /*
3606 * write barrier to synchronize with initialization
3607 * of log entry
3608 */
3609 smp_wmb();
3610 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003611 return;
3612
Todd Kjos7a4408c2017-06-29 12:01:57 -07003613err_dead_proc_or_thread:
3614 return_error = BR_DEAD_REPLY;
3615 return_error_line = __LINE__;
Xu YiPingd53bebd2017-09-05 10:21:52 -07003616 binder_dequeue_work(proc, tcomplete);
Martijn Coenena056af42017-02-03 14:40:49 -08003617err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003618err_bad_object_type:
3619err_bad_offset:
Martijn Coenendef95c72017-02-03 14:40:52 -08003620err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003621err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003622 trace_binder_transaction_failed_buffer_release(t->buffer);
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003623 binder_transaction_buffer_release(target_proc, t->buffer,
3624 buffer_offset, true);
Todd Kjos512cf462017-09-29 15:39:49 -07003625 if (target_node)
3626 binder_dec_node_tmpref(target_node);
Todd Kjoseb349832017-06-29 12:01:56 -07003627 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003628 t->buffer->transaction = NULL;
Todd Kjos19c98722017-06-29 12:01:40 -07003629 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003630err_binder_alloc_buf_failed:
Todd Kjos8ae84852019-04-24 12:31:18 -07003631err_bad_extra_size:
Todd Kjos00bac142019-01-14 09:10:21 -08003632 if (secctx)
3633 security_release_secctx(secctx, secctx_sz);
3634err_get_secctx_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003635 kfree(tcomplete);
3636 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3637err_alloc_tcomplete_failed:
3638 kfree(t);
3639 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3640err_alloc_t_failed:
Sherry Yangb0cb2d82018-08-13 17:28:53 -07003641err_bad_todo_list:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003642err_bad_call_stack:
3643err_empty_call_stack:
3644err_dead_binder:
3645err_invalid_target_handle:
Todd Kjos7a4408c2017-06-29 12:01:57 -07003646 if (target_thread)
3647 binder_thread_dec_tmpref(target_thread);
3648 if (target_proc)
3649 binder_proc_dec_tmpref(target_proc);
Todd Kjos512cf462017-09-29 15:39:49 -07003650 if (target_node) {
Todd Kjoseb349832017-06-29 12:01:56 -07003651 binder_dec_node(target_node, 1, 0);
Todd Kjos512cf462017-09-29 15:39:49 -07003652 binder_dec_node_tmpref(target_node);
3653 }
Todd Kjoseb349832017-06-29 12:01:56 -07003654
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003655 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjos57ada2f2017-06-29 12:01:46 -07003656 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3657 proc->pid, thread->pid, return_error, return_error_param,
3658 (u64)tr->data_size, (u64)tr->offsets_size,
3659 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003660
3661 {
3662 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09003663
Todd Kjos57ada2f2017-06-29 12:01:46 -07003664 e->return_error = return_error;
3665 e->return_error_param = return_error_param;
3666 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003667 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3668 *fe = *e;
Todd Kjosd99c7332017-06-29 12:01:53 -07003669 /*
3670 * write barrier to synchronize with initialization
3671 * of log entry
3672 */
3673 smp_wmb();
3674 WRITE_ONCE(e->debug_id_done, t_debug_id);
3675 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003676 }
3677
Todd Kjos26549d12017-06-29 12:01:55 -07003678 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003679 if (in_reply_to) {
Martijn Coenence388e02017-06-06 17:04:42 -07003680 binder_restore_priority(current, in_reply_to->saved_priority);
Todd Kjos26549d12017-06-29 12:01:55 -07003681 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
Martijn Coenen148ade22017-11-15 09:21:35 +01003682 binder_enqueue_thread_work(thread, &thread->return_error.work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003683 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos26549d12017-06-29 12:01:55 -07003684 } else {
3685 thread->return_error.cmd = return_error;
Martijn Coenen148ade22017-11-15 09:21:35 +01003686 binder_enqueue_thread_work(thread, &thread->return_error.work);
Todd Kjos26549d12017-06-29 12:01:55 -07003687 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003688}
3689
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02003690static int binder_thread_write(struct binder_proc *proc,
3691 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003692 binder_uintptr_t binder_buffer, size_t size,
3693 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003694{
3695 uint32_t cmd;
Martijn Coenen342e5c92017-02-03 14:40:46 -08003696 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003697 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003698 void __user *ptr = buffer + *consumed;
3699 void __user *end = buffer + size;
3700
Todd Kjos26549d12017-06-29 12:01:55 -07003701 while (ptr < end && thread->return_error.cmd == BR_OK) {
Todd Kjos372e3142017-06-29 12:01:58 -07003702 int ret;
3703
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003704 if (get_user(cmd, (uint32_t __user *)ptr))
3705 return -EFAULT;
3706 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003707 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003708 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07003709 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3710 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3711 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003712 }
3713 switch (cmd) {
3714 case BC_INCREFS:
3715 case BC_ACQUIRE:
3716 case BC_RELEASE:
3717 case BC_DECREFS: {
3718 uint32_t target;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003719 const char *debug_string;
Todd Kjos372e3142017-06-29 12:01:58 -07003720 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3721 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3722 struct binder_ref_data rdata;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003723
3724 if (get_user(target, (uint32_t __user *)ptr))
3725 return -EFAULT;
Todd Kjosc44b1232017-06-29 12:01:43 -07003726
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003727 ptr += sizeof(uint32_t);
Todd Kjos372e3142017-06-29 12:01:58 -07003728 ret = -1;
3729 if (increment && !target) {
Todd Kjosc44b1232017-06-29 12:01:43 -07003730 struct binder_node *ctx_mgr_node;
Todd Kjosc44b1232017-06-29 12:01:43 -07003731 mutex_lock(&context->context_mgr_node_lock);
3732 ctx_mgr_node = context->binder_context_mgr_node;
Todd Kjos372e3142017-06-29 12:01:58 -07003733 if (ctx_mgr_node)
3734 ret = binder_inc_ref_for_node(
3735 proc, ctx_mgr_node,
3736 strong, NULL, &rdata);
Todd Kjosc44b1232017-06-29 12:01:43 -07003737 mutex_unlock(&context->context_mgr_node_lock);
3738 }
Todd Kjos372e3142017-06-29 12:01:58 -07003739 if (ret)
3740 ret = binder_update_ref_for_handle(
3741 proc, target, increment, strong,
3742 &rdata);
3743 if (!ret && rdata.desc != target) {
3744 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3745 proc->pid, thread->pid,
3746 target, rdata.desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003747 }
3748 switch (cmd) {
3749 case BC_INCREFS:
3750 debug_string = "IncRefs";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003751 break;
3752 case BC_ACQUIRE:
3753 debug_string = "Acquire";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003754 break;
3755 case BC_RELEASE:
3756 debug_string = "Release";
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003757 break;
3758 case BC_DECREFS:
3759 default:
3760 debug_string = "DecRefs";
Todd Kjos372e3142017-06-29 12:01:58 -07003761 break;
3762 }
3763 if (ret) {
3764 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3765 proc->pid, thread->pid, debug_string,
3766 strong, target, ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003767 break;
3768 }
3769 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjos372e3142017-06-29 12:01:58 -07003770 "%d:%d %s ref %d desc %d s %d w %d\n",
3771 proc->pid, thread->pid, debug_string,
3772 rdata.debug_id, rdata.desc, rdata.strong,
3773 rdata.weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003774 break;
3775 }
3776 case BC_INCREFS_DONE:
3777 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003778 binder_uintptr_t node_ptr;
3779 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003780 struct binder_node *node;
Todd Kjos673068e2017-06-29 12:02:03 -07003781 bool free_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003782
Arve Hjønnevågda498892014-02-21 14:40:26 -08003783 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003784 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003785 ptr += sizeof(binder_uintptr_t);
3786 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003787 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003788 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003789 node = binder_get_node(proc, node_ptr);
3790 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003791 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003792 proc->pid, thread->pid,
3793 cmd == BC_INCREFS_DONE ?
3794 "BC_INCREFS_DONE" :
3795 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003796 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003797 break;
3798 }
3799 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003800 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003801 proc->pid, thread->pid,
3802 cmd == BC_INCREFS_DONE ?
3803 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08003804 (u64)node_ptr, node->debug_id,
3805 (u64)cookie, (u64)node->cookie);
Todd Kjosadc18842017-06-29 12:01:59 -07003806 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003807 break;
3808 }
Todd Kjos673068e2017-06-29 12:02:03 -07003809 binder_node_inner_lock(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003810 if (cmd == BC_ACQUIRE_DONE) {
3811 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303812 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003813 proc->pid, thread->pid,
3814 node->debug_id);
Todd Kjos673068e2017-06-29 12:02:03 -07003815 binder_node_inner_unlock(node);
Todd Kjosadc18842017-06-29 12:01:59 -07003816 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003817 break;
3818 }
3819 node->pending_strong_ref = 0;
3820 } else {
3821 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05303822 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003823 proc->pid, thread->pid,
3824 node->debug_id);
Todd Kjos673068e2017-06-29 12:02:03 -07003825 binder_node_inner_unlock(node);
Todd Kjosadc18842017-06-29 12:01:59 -07003826 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003827 break;
3828 }
3829 node->pending_weak_ref = 0;
3830 }
Todd Kjos673068e2017-06-29 12:02:03 -07003831 free_node = binder_dec_node_nilocked(node,
3832 cmd == BC_ACQUIRE_DONE, 0);
3833 WARN_ON(free_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003834 binder_debug(BINDER_DEBUG_USER_REFS,
Todd Kjosadc18842017-06-29 12:01:59 -07003835 "%d:%d %s node %d ls %d lw %d tr %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003836 proc->pid, thread->pid,
3837 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Todd Kjosadc18842017-06-29 12:01:59 -07003838 node->debug_id, node->local_strong_refs,
3839 node->local_weak_refs, node->tmp_refs);
Todd Kjos673068e2017-06-29 12:02:03 -07003840 binder_node_inner_unlock(node);
Todd Kjosadc18842017-06-29 12:01:59 -07003841 binder_put_node(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003842 break;
3843 }
3844 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303845 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003846 return -EINVAL;
3847 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303848 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003849 return -EINVAL;
3850
3851 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08003852 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003853 struct binder_buffer *buffer;
3854
Arve Hjønnevågda498892014-02-21 14:40:26 -08003855 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003856 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003857 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003858
Todd Kjos53d311cf2017-06-29 12:01:51 -07003859 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3860 data_ptr);
Todd Kjos553927d2018-11-06 15:55:32 -08003861 if (IS_ERR_OR_NULL(buffer)) {
3862 if (PTR_ERR(buffer) == -EPERM) {
3863 binder_user_error(
3864 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3865 proc->pid, thread->pid,
3866 (u64)data_ptr);
3867 } else {
3868 binder_user_error(
3869 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3870 proc->pid, thread->pid,
3871 (u64)data_ptr);
3872 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003873 break;
3874 }
3875 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003876 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3877 proc->pid, thread->pid, (u64)data_ptr,
3878 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003879 buffer->transaction ? "active" : "finished");
3880
Todd Kjos22068d42019-06-12 13:29:27 -07003881 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003882 if (buffer->transaction) {
3883 buffer->transaction->buffer = NULL;
3884 buffer->transaction = NULL;
3885 }
Todd Kjos22068d42019-06-12 13:29:27 -07003886 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003887 if (buffer->async_transaction && buffer->target_node) {
Todd Kjos72196392017-06-29 12:02:02 -07003888 struct binder_node *buf_node;
3889 struct binder_work *w;
3890
3891 buf_node = buffer->target_node;
Todd Kjos673068e2017-06-29 12:02:03 -07003892 binder_node_inner_lock(buf_node);
Todd Kjos72196392017-06-29 12:02:02 -07003893 BUG_ON(!buf_node->has_async_transaction);
3894 BUG_ON(buf_node->proc != proc);
Todd Kjos72196392017-06-29 12:02:02 -07003895 w = binder_dequeue_work_head_ilocked(
3896 &buf_node->async_todo);
Martijn Coenen3a6430c2017-08-31 10:04:29 +02003897 if (!w) {
Gustavo A. R. Silva197410a2018-01-23 12:04:27 -06003898 buf_node->has_async_transaction = false;
Martijn Coenen3a6430c2017-08-31 10:04:29 +02003899 } else {
Todd Kjos72196392017-06-29 12:02:02 -07003900 binder_enqueue_work_ilocked(
Martijn Coenen3a6430c2017-08-31 10:04:29 +02003901 w, &proc->todo);
3902 binder_wakeup_proc_ilocked(proc);
3903 }
Todd Kjos673068e2017-06-29 12:02:03 -07003904 binder_node_inner_unlock(buf_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003905 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003906 trace_binder_transaction_buffer_release(buffer);
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08003907 binder_transaction_buffer_release(proc, buffer, 0, false);
Todd Kjos19c98722017-06-29 12:01:40 -07003908 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003909 break;
3910 }
3911
Martijn Coenen79802402017-02-03 14:40:51 -08003912 case BC_TRANSACTION_SG:
3913 case BC_REPLY_SG: {
3914 struct binder_transaction_data_sg tr;
3915
3916 if (copy_from_user(&tr, ptr, sizeof(tr)))
3917 return -EFAULT;
3918 ptr += sizeof(tr);
3919 binder_transaction(proc, thread, &tr.transaction_data,
3920 cmd == BC_REPLY_SG, tr.buffers_size);
3921 break;
3922 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003923 case BC_TRANSACTION:
3924 case BC_REPLY: {
3925 struct binder_transaction_data tr;
3926
3927 if (copy_from_user(&tr, ptr, sizeof(tr)))
3928 return -EFAULT;
3929 ptr += sizeof(tr);
Martijn Coenen4bfac802017-02-03 14:40:50 -08003930 binder_transaction(proc, thread, &tr,
3931 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003932 break;
3933 }
3934
3935 case BC_REGISTER_LOOPER:
3936 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303937 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003938 proc->pid, thread->pid);
Todd Kjosb3e68612017-06-29 12:02:07 -07003939 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003940 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3941 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303942 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003943 proc->pid, thread->pid);
3944 } else if (proc->requested_threads == 0) {
3945 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303946 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003947 proc->pid, thread->pid);
3948 } else {
3949 proc->requested_threads--;
3950 proc->requested_threads_started++;
3951 }
3952 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
Todd Kjosb3e68612017-06-29 12:02:07 -07003953 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003954 break;
3955 case BC_ENTER_LOOPER:
3956 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303957 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003958 proc->pid, thread->pid);
3959 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3960 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303961 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003962 proc->pid, thread->pid);
3963 }
3964 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3965 break;
3966 case BC_EXIT_LOOPER:
3967 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303968 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003969 proc->pid, thread->pid);
3970 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3971 break;
3972
3973 case BC_REQUEST_DEATH_NOTIFICATION:
3974 case BC_CLEAR_DEATH_NOTIFICATION: {
3975 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003976 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003977 struct binder_ref *ref;
Todd Kjos2c1838d2017-06-29 12:02:08 -07003978 struct binder_ref_death *death = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003979
3980 if (get_user(target, (uint32_t __user *)ptr))
3981 return -EFAULT;
3982 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003983 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003984 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08003985 ptr += sizeof(binder_uintptr_t);
Todd Kjos2c1838d2017-06-29 12:02:08 -07003986 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3987 /*
3988 * Allocate memory for death notification
3989 * before taking lock
3990 */
3991 death = kzalloc(sizeof(*death), GFP_KERNEL);
3992 if (death == NULL) {
3993 WARN_ON(thread->return_error.cmd !=
3994 BR_OK);
3995 thread->return_error.cmd = BR_ERROR;
Martijn Coenen148ade22017-11-15 09:21:35 +01003996 binder_enqueue_thread_work(
3997 thread,
3998 &thread->return_error.work);
Todd Kjos2c1838d2017-06-29 12:02:08 -07003999 binder_debug(
4000 BINDER_DEBUG_FAILED_TRANSACTION,
4001 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4002 proc->pid, thread->pid);
4003 break;
4004 }
4005 }
4006 binder_proc_lock(proc);
4007 ref = binder_get_ref_olocked(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004008 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304009 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004010 proc->pid, thread->pid,
4011 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4012 "BC_REQUEST_DEATH_NOTIFICATION" :
4013 "BC_CLEAR_DEATH_NOTIFICATION",
4014 target);
Todd Kjos2c1838d2017-06-29 12:02:08 -07004015 binder_proc_unlock(proc);
4016 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004017 break;
4018 }
4019
4020 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004021 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004022 proc->pid, thread->pid,
4023 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4024 "BC_REQUEST_DEATH_NOTIFICATION" :
4025 "BC_CLEAR_DEATH_NOTIFICATION",
Todd Kjos372e3142017-06-29 12:01:58 -07004026 (u64)cookie, ref->data.debug_id,
4027 ref->data.desc, ref->data.strong,
4028 ref->data.weak, ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004029
Martijn Coenenab51ec62017-06-29 12:02:10 -07004030 binder_node_lock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004031 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4032 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304033 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004034 proc->pid, thread->pid);
Martijn Coenenab51ec62017-06-29 12:02:10 -07004035 binder_node_unlock(ref->node);
Todd Kjos2c1838d2017-06-29 12:02:08 -07004036 binder_proc_unlock(proc);
4037 kfree(death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004038 break;
4039 }
4040 binder_stats_created(BINDER_STAT_DEATH);
4041 INIT_LIST_HEAD(&death->work.entry);
4042 death->cookie = cookie;
4043 ref->death = death;
4044 if (ref->node->proc == NULL) {
4045 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
Martijn Coenenbb745622017-08-31 10:04:28 +02004046
4047 binder_inner_proc_lock(proc);
4048 binder_enqueue_work_ilocked(
4049 &ref->death->work, &proc->todo);
4050 binder_wakeup_proc_ilocked(proc);
4051 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004052 }
4053 } else {
4054 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304055 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004056 proc->pid, thread->pid);
Todd Kjos673068e2017-06-29 12:02:03 -07004057 binder_node_unlock(ref->node);
Todd Kjos2c1838d2017-06-29 12:02:08 -07004058 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004059 break;
4060 }
4061 death = ref->death;
4062 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004063 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004064 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004065 (u64)death->cookie,
4066 (u64)cookie);
Todd Kjos673068e2017-06-29 12:02:03 -07004067 binder_node_unlock(ref->node);
Todd Kjos2c1838d2017-06-29 12:02:08 -07004068 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004069 break;
4070 }
4071 ref->death = NULL;
Todd Kjos72196392017-06-29 12:02:02 -07004072 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004073 if (list_empty(&death->work.entry)) {
4074 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos72196392017-06-29 12:02:02 -07004075 if (thread->looper &
4076 (BINDER_LOOPER_STATE_REGISTERED |
4077 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen148ade22017-11-15 09:21:35 +01004078 binder_enqueue_thread_work_ilocked(
4079 thread,
4080 &death->work);
Todd Kjos72196392017-06-29 12:02:02 -07004081 else {
4082 binder_enqueue_work_ilocked(
4083 &death->work,
4084 &proc->todo);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004085 binder_wakeup_proc_ilocked(
Martijn Coenen408c68b2017-08-31 10:04:19 +02004086 proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004087 }
4088 } else {
4089 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4090 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4091 }
Todd Kjos72196392017-06-29 12:02:02 -07004092 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004093 }
Martijn Coenenab51ec62017-06-29 12:02:10 -07004094 binder_node_unlock(ref->node);
Todd Kjos2c1838d2017-06-29 12:02:08 -07004095 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004096 } break;
4097 case BC_DEAD_BINDER_DONE: {
4098 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08004099 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004100 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09004101
Arve Hjønnevågda498892014-02-21 14:40:26 -08004102 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004103 return -EFAULT;
4104
Lisa Du7a64cd82016-02-17 09:32:52 +08004105 ptr += sizeof(cookie);
Todd Kjos72196392017-06-29 12:02:02 -07004106 binder_inner_proc_lock(proc);
4107 list_for_each_entry(w, &proc->delivered_death,
4108 entry) {
4109 struct binder_ref_death *tmp_death =
4110 container_of(w,
4111 struct binder_ref_death,
4112 work);
Seunghun Lee10f62862014-05-01 01:30:23 +09004113
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004114 if (tmp_death->cookie == cookie) {
4115 death = tmp_death;
4116 break;
4117 }
4118 }
4119 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Todd Kjos8ca86f12018-02-07 13:57:37 -08004120 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08004121 proc->pid, thread->pid, (u64)cookie,
4122 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004123 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08004124 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4125 proc->pid, thread->pid, (u64)cookie);
Todd Kjos72196392017-06-29 12:02:02 -07004126 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004127 break;
4128 }
Todd Kjos72196392017-06-29 12:02:02 -07004129 binder_dequeue_work_ilocked(&death->work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004130 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4131 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
Todd Kjos72196392017-06-29 12:02:02 -07004132 if (thread->looper &
4133 (BINDER_LOOPER_STATE_REGISTERED |
4134 BINDER_LOOPER_STATE_ENTERED))
Martijn Coenen148ade22017-11-15 09:21:35 +01004135 binder_enqueue_thread_work_ilocked(
4136 thread, &death->work);
Todd Kjos72196392017-06-29 12:02:02 -07004137 else {
4138 binder_enqueue_work_ilocked(
4139 &death->work,
4140 &proc->todo);
Martijn Coenen408c68b2017-08-31 10:04:19 +02004141 binder_wakeup_proc_ilocked(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004142 }
4143 }
Todd Kjos72196392017-06-29 12:02:02 -07004144 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004145 } break;
4146
4147 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304148 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004149 proc->pid, thread->pid, cmd);
4150 return -EINVAL;
4151 }
4152 *consumed = ptr - buffer;
4153 }
4154 return 0;
4155}
4156
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02004157static void binder_stat_br(struct binder_proc *proc,
4158 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004159{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004160 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004161 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07004162 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4163 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4164 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004165 }
4166}
4167
Todd Kjos26b47d82017-06-29 12:01:47 -07004168static int binder_put_node_cmd(struct binder_proc *proc,
4169 struct binder_thread *thread,
4170 void __user **ptrp,
4171 binder_uintptr_t node_ptr,
4172 binder_uintptr_t node_cookie,
4173 int node_debug_id,
4174 uint32_t cmd, const char *cmd_name)
4175{
4176 void __user *ptr = *ptrp;
4177
4178 if (put_user(cmd, (uint32_t __user *)ptr))
4179 return -EFAULT;
4180 ptr += sizeof(uint32_t);
4181
4182 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4183 return -EFAULT;
4184 ptr += sizeof(binder_uintptr_t);
4185
4186 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4187 return -EFAULT;
4188 ptr += sizeof(binder_uintptr_t);
4189
4190 binder_stat_br(proc, thread, cmd);
4191 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4192 proc->pid, thread->pid, cmd_name, node_debug_id,
4193 (u64)node_ptr, (u64)node_cookie);
4194
4195 *ptrp = ptr;
4196 return 0;
4197}
4198
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004199static int binder_wait_for_work(struct binder_thread *thread,
4200 bool do_proc_work)
4201{
4202 DEFINE_WAIT(wait);
4203 struct binder_proc *proc = thread->proc;
4204 int ret = 0;
4205
4206 freezer_do_not_count();
4207 binder_inner_proc_lock(proc);
4208 for (;;) {
4209 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4210 if (binder_has_work_ilocked(thread, do_proc_work))
4211 break;
4212 if (do_proc_work)
4213 list_add(&thread->waiting_thread_node,
4214 &proc->waiting_threads);
4215 binder_inner_proc_unlock(proc);
4216 schedule();
4217 binder_inner_proc_lock(proc);
4218 list_del_init(&thread->waiting_thread_node);
4219 if (signal_pending(current)) {
4220 ret = -ERESTARTSYS;
4221 break;
4222 }
4223 }
4224 finish_wait(&thread->wait, &wait);
4225 binder_inner_proc_unlock(proc);
4226 freezer_count();
4227
4228 return ret;
4229}
4230
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004231static int binder_thread_read(struct binder_proc *proc,
4232 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004233 binder_uintptr_t binder_buffer, size_t size,
4234 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004235{
Arve Hjønnevågda498892014-02-21 14:40:26 -08004236 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004237 void __user *ptr = buffer + *consumed;
4238 void __user *end = buffer + size;
4239
4240 int ret = 0;
4241 int wait_for_proc_work;
4242
4243 if (*consumed == 0) {
4244 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4245 return -EFAULT;
4246 ptr += sizeof(uint32_t);
4247 }
4248
4249retry:
Martijn Coenen0b89d692017-06-29 12:02:06 -07004250 binder_inner_proc_lock(proc);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004251 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
Martijn Coenen0b89d692017-06-29 12:02:06 -07004252 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004253
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004254 thread->looper |= BINDER_LOOPER_STATE_WAITING;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004255
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004256 trace_binder_wait_for_work(wait_for_proc_work,
4257 !!thread->transaction_stack,
Todd Kjos72196392017-06-29 12:02:02 -07004258 !binder_worklist_empty(proc, &thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004259 if (wait_for_proc_work) {
4260 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4261 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05304262 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004263 proc->pid, thread->pid, thread->looper);
4264 wait_event_interruptible(binder_user_error_wait,
4265 binder_stop_on_user_error < 2);
4266 }
Martijn Coenence388e02017-06-06 17:04:42 -07004267 binder_restore_priority(current, proc->default_priority);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004268 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004269
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004270 if (non_block) {
4271 if (!binder_has_work(thread, wait_for_proc_work))
4272 ret = -EAGAIN;
4273 } else {
4274 ret = binder_wait_for_work(thread, wait_for_proc_work);
4275 }
4276
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004277 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4278
4279 if (ret)
4280 return ret;
4281
4282 while (1) {
4283 uint32_t cmd;
Todd Kjos00bac142019-01-14 09:10:21 -08004284 struct binder_transaction_data_secctx tr;
4285 struct binder_transaction_data *trd = &tr.transaction_data;
Todd Kjos72196392017-06-29 12:02:02 -07004286 struct binder_work *w = NULL;
4287 struct list_head *list = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004288 struct binder_transaction *t = NULL;
Todd Kjos7a4408c2017-06-29 12:01:57 -07004289 struct binder_thread *t_from;
Todd Kjos00bac142019-01-14 09:10:21 -08004290 size_t trsize = sizeof(*trd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004291
Todd Kjosed297212017-06-29 12:02:01 -07004292 binder_inner_proc_lock(proc);
Todd Kjos72196392017-06-29 12:02:02 -07004293 if (!binder_worklist_empty_ilocked(&thread->todo))
4294 list = &thread->todo;
4295 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4296 wait_for_proc_work)
4297 list = &proc->todo;
4298 else {
4299 binder_inner_proc_unlock(proc);
4300
Dmitry Voytik395262a2014-09-08 18:16:34 +04004301 /* no data added */
Todd Kjos08dabce2017-06-29 12:01:49 -07004302 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004303 goto retry;
4304 break;
4305 }
4306
Todd Kjosed297212017-06-29 12:02:01 -07004307 if (end - ptr < sizeof(tr) + 4) {
4308 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004309 break;
Todd Kjosed297212017-06-29 12:02:01 -07004310 }
Todd Kjos72196392017-06-29 12:02:02 -07004311 w = binder_dequeue_work_head_ilocked(list);
Martijn Coenen148ade22017-11-15 09:21:35 +01004312 if (binder_worklist_empty_ilocked(&thread->todo))
4313 thread->process_todo = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004314
4315 switch (w->type) {
4316 case BINDER_WORK_TRANSACTION: {
Todd Kjosed297212017-06-29 12:02:01 -07004317 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004318 t = container_of(w, struct binder_transaction, work);
4319 } break;
Todd Kjos26549d12017-06-29 12:01:55 -07004320 case BINDER_WORK_RETURN_ERROR: {
4321 struct binder_error *e = container_of(
4322 w, struct binder_error, work);
4323
4324 WARN_ON(e->cmd == BR_OK);
Todd Kjosed297212017-06-29 12:02:01 -07004325 binder_inner_proc_unlock(proc);
Todd Kjos26549d12017-06-29 12:01:55 -07004326 if (put_user(e->cmd, (uint32_t __user *)ptr))
4327 return -EFAULT;
宋金时838d5562018-05-10 02:05:03 +00004328 cmd = e->cmd;
Todd Kjos26549d12017-06-29 12:01:55 -07004329 e->cmd = BR_OK;
4330 ptr += sizeof(uint32_t);
4331
宋金时838d5562018-05-10 02:05:03 +00004332 binder_stat_br(proc, thread, cmd);
Todd Kjos26549d12017-06-29 12:01:55 -07004333 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004334 case BINDER_WORK_TRANSACTION_COMPLETE: {
Todd Kjosed297212017-06-29 12:02:01 -07004335 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004336 cmd = BR_TRANSACTION_COMPLETE;
Todd Kjos524ad002019-06-21 10:54:15 -07004337 kfree(w);
4338 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004339 if (put_user(cmd, (uint32_t __user *)ptr))
4340 return -EFAULT;
4341 ptr += sizeof(uint32_t);
4342
4343 binder_stat_br(proc, thread, cmd);
4344 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304345 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004346 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004347 } break;
4348 case BINDER_WORK_NODE: {
4349 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos26b47d82017-06-29 12:01:47 -07004350 int strong, weak;
4351 binder_uintptr_t node_ptr = node->ptr;
4352 binder_uintptr_t node_cookie = node->cookie;
4353 int node_debug_id = node->debug_id;
4354 int has_weak_ref;
4355 int has_strong_ref;
4356 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09004357
Todd Kjos26b47d82017-06-29 12:01:47 -07004358 BUG_ON(proc != node->proc);
4359 strong = node->internal_strong_refs ||
4360 node->local_strong_refs;
4361 weak = !hlist_empty(&node->refs) ||
Todd Kjosadc18842017-06-29 12:01:59 -07004362 node->local_weak_refs ||
4363 node->tmp_refs || strong;
Todd Kjos26b47d82017-06-29 12:01:47 -07004364 has_strong_ref = node->has_strong_ref;
4365 has_weak_ref = node->has_weak_ref;
4366
4367 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004368 node->has_weak_ref = 1;
4369 node->pending_weak_ref = 1;
4370 node->local_weak_refs++;
Todd Kjos26b47d82017-06-29 12:01:47 -07004371 }
4372 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004373 node->has_strong_ref = 1;
4374 node->pending_strong_ref = 1;
4375 node->local_strong_refs++;
Todd Kjos26b47d82017-06-29 12:01:47 -07004376 }
4377 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004378 node->has_strong_ref = 0;
Todd Kjos26b47d82017-06-29 12:01:47 -07004379 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004380 node->has_weak_ref = 0;
Todd Kjos26b47d82017-06-29 12:01:47 -07004381 if (!weak && !strong) {
4382 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4383 "%d:%d node %d u%016llx c%016llx deleted\n",
4384 proc->pid, thread->pid,
4385 node_debug_id,
4386 (u64)node_ptr,
4387 (u64)node_cookie);
4388 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjosed297212017-06-29 12:02:01 -07004389 binder_inner_proc_unlock(proc);
Todd Kjos673068e2017-06-29 12:02:03 -07004390 binder_node_lock(node);
4391 /*
4392 * Acquire the node lock before freeing the
4393 * node to serialize with other threads that
4394 * may have been holding the node lock while
4395 * decrementing this node (avoids race where
4396 * this thread frees while the other thread
4397 * is unlocking the node after the final
4398 * decrement)
4399 */
4400 binder_node_unlock(node);
Todd Kjosed297212017-06-29 12:02:01 -07004401 binder_free_node(node);
4402 } else
4403 binder_inner_proc_unlock(proc);
4404
Todd Kjos26b47d82017-06-29 12:01:47 -07004405 if (weak && !has_weak_ref)
4406 ret = binder_put_node_cmd(
4407 proc, thread, &ptr, node_ptr,
4408 node_cookie, node_debug_id,
4409 BR_INCREFS, "BR_INCREFS");
4410 if (!ret && strong && !has_strong_ref)
4411 ret = binder_put_node_cmd(
4412 proc, thread, &ptr, node_ptr,
4413 node_cookie, node_debug_id,
4414 BR_ACQUIRE, "BR_ACQUIRE");
4415 if (!ret && !strong && has_strong_ref)
4416 ret = binder_put_node_cmd(
4417 proc, thread, &ptr, node_ptr,
4418 node_cookie, node_debug_id,
4419 BR_RELEASE, "BR_RELEASE");
4420 if (!ret && !weak && has_weak_ref)
4421 ret = binder_put_node_cmd(
4422 proc, thread, &ptr, node_ptr,
4423 node_cookie, node_debug_id,
4424 BR_DECREFS, "BR_DECREFS");
4425 if (orig_ptr == ptr)
4426 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4427 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4428 proc->pid, thread->pid,
4429 node_debug_id,
4430 (u64)node_ptr,
4431 (u64)node_cookie);
4432 if (ret)
4433 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004434 } break;
4435 case BINDER_WORK_DEAD_BINDER:
4436 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4437 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4438 struct binder_ref_death *death;
4439 uint32_t cmd;
Martijn Coenenab51ec62017-06-29 12:02:10 -07004440 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004441
4442 death = container_of(w, struct binder_ref_death, work);
4443 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4444 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4445 else
4446 cmd = BR_DEAD_BINDER;
Martijn Coenenab51ec62017-06-29 12:02:10 -07004447 cookie = death->cookie;
4448
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004449 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004450 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004451 proc->pid, thread->pid,
4452 cmd == BR_DEAD_BINDER ?
4453 "BR_DEAD_BINDER" :
4454 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Martijn Coenenab51ec62017-06-29 12:02:10 -07004455 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004456 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
Martijn Coenenab51ec62017-06-29 12:02:10 -07004457 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004458 kfree(death);
4459 binder_stats_deleted(BINDER_STAT_DEATH);
Todd Kjosed297212017-06-29 12:02:01 -07004460 } else {
Todd Kjos72196392017-06-29 12:02:02 -07004461 binder_enqueue_work_ilocked(
4462 w, &proc->delivered_death);
Todd Kjosed297212017-06-29 12:02:01 -07004463 binder_inner_proc_unlock(proc);
4464 }
Martijn Coenenab51ec62017-06-29 12:02:10 -07004465 if (put_user(cmd, (uint32_t __user *)ptr))
4466 return -EFAULT;
4467 ptr += sizeof(uint32_t);
4468 if (put_user(cookie,
4469 (binder_uintptr_t __user *)ptr))
4470 return -EFAULT;
4471 ptr += sizeof(binder_uintptr_t);
4472 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004473 if (cmd == BR_DEAD_BINDER)
4474 goto done; /* DEAD_BINDER notifications can cause transactions */
4475 } break;
4476 }
4477
4478 if (!t)
4479 continue;
4480
4481 BUG_ON(t->buffer == NULL);
4482 if (t->buffer->target_node) {
4483 struct binder_node *target_node = t->buffer->target_node;
Martijn Coenence388e02017-06-06 17:04:42 -07004484 struct binder_priority node_prio;
Seunghun Lee10f62862014-05-01 01:30:23 +09004485
Todd Kjos00bac142019-01-14 09:10:21 -08004486 trd->target.ptr = target_node->ptr;
4487 trd->cookie = target_node->cookie;
Martijn Coenence388e02017-06-06 17:04:42 -07004488 node_prio.sched_policy = target_node->sched_policy;
4489 node_prio.prio = target_node->min_priority;
4490 binder_transaction_priority(current, t, node_prio,
4491 target_node->inherit_rt);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004492 cmd = BR_TRANSACTION;
4493 } else {
Todd Kjos00bac142019-01-14 09:10:21 -08004494 trd->target.ptr = 0;
4495 trd->cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004496 cmd = BR_REPLY;
4497 }
Todd Kjos00bac142019-01-14 09:10:21 -08004498 trd->code = t->code;
4499 trd->flags = t->flags;
4500 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004501
Todd Kjos7a4408c2017-06-29 12:01:57 -07004502 t_from = binder_get_txn_from(t);
4503 if (t_from) {
4504 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09004505
Todd Kjos00bac142019-01-14 09:10:21 -08004506 trd->sender_pid =
4507 task_tgid_nr_ns(sender,
4508 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004509 } else {
Todd Kjos00bac142019-01-14 09:10:21 -08004510 trd->sender_pid = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004511 }
4512
Todd Kjos00bac142019-01-14 09:10:21 -08004513 trd->data_size = t->buffer->data_size;
4514 trd->offsets_size = t->buffer->offsets_size;
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08004515 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
Todd Kjos00bac142019-01-14 09:10:21 -08004516 trd->data.ptr.offsets = trd->data.ptr.buffer +
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004517 ALIGN(t->buffer->data_size,
4518 sizeof(void *));
4519
Todd Kjos00bac142019-01-14 09:10:21 -08004520 tr.secctx = t->security_ctx;
4521 if (t->security_ctx) {
4522 cmd = BR_TRANSACTION_SEC_CTX;
4523 trsize = sizeof(tr);
4524 }
Todd Kjos7a4408c2017-06-29 12:01:57 -07004525 if (put_user(cmd, (uint32_t __user *)ptr)) {
4526 if (t_from)
4527 binder_thread_dec_tmpref(t_from);
Martijn Coenenfb2c4452017-11-13 10:06:08 +01004528
4529 binder_cleanup_transaction(t, "put_user failed",
4530 BR_FAILED_REPLY);
4531
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004532 return -EFAULT;
Todd Kjos7a4408c2017-06-29 12:01:57 -07004533 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004534 ptr += sizeof(uint32_t);
Todd Kjos00bac142019-01-14 09:10:21 -08004535 if (copy_to_user(ptr, &tr, trsize)) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07004536 if (t_from)
4537 binder_thread_dec_tmpref(t_from);
Martijn Coenenfb2c4452017-11-13 10:06:08 +01004538
4539 binder_cleanup_transaction(t, "copy_to_user failed",
4540 BR_FAILED_REPLY);
4541
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004542 return -EFAULT;
Todd Kjos7a4408c2017-06-29 12:01:57 -07004543 }
Todd Kjos00bac142019-01-14 09:10:21 -08004544 ptr += trsize;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004545
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004546 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004547 binder_stat_br(proc, thread, cmd);
4548 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004549 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004550 proc->pid, thread->pid,
4551 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
Todd Kjos00bac142019-01-14 09:10:21 -08004552 (cmd == BR_TRANSACTION_SEC_CTX) ?
4553 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
Todd Kjos7a4408c2017-06-29 12:01:57 -07004554 t->debug_id, t_from ? t_from->proc->pid : 0,
4555 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004556 t->buffer->data_size, t->buffer->offsets_size,
Todd Kjos00bac142019-01-14 09:10:21 -08004557 (u64)trd->data.ptr.buffer,
4558 (u64)trd->data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004559
Todd Kjos7a4408c2017-06-29 12:01:57 -07004560 if (t_from)
4561 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004562 t->buffer->allow_user_free = 1;
Todd Kjos00bac142019-01-14 09:10:21 -08004563 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
Martijn Coenen0b89d692017-06-29 12:02:06 -07004564 binder_inner_proc_lock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004565 t->to_parent = thread->transaction_stack;
4566 t->to_thread = thread;
4567 thread->transaction_stack = t;
Martijn Coenen0b89d692017-06-29 12:02:06 -07004568 binder_inner_proc_unlock(thread->proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004569 } else {
Todd Kjosb6d282c2017-06-29 12:01:54 -07004570 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004571 }
4572 break;
4573 }
4574
4575done:
4576
4577 *consumed = ptr - buffer;
Todd Kjosb3e68612017-06-29 12:02:07 -07004578 binder_inner_proc_lock(proc);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004579 if (proc->requested_threads == 0 &&
4580 list_empty(&thread->proc->waiting_threads) &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004581 proc->requested_threads_started < proc->max_threads &&
4582 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4583 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4584 /*spawn a new thread if we leave this out */) {
4585 proc->requested_threads++;
Todd Kjosb3e68612017-06-29 12:02:07 -07004586 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004587 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304588 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004589 proc->pid, thread->pid);
4590 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4591 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07004592 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Todd Kjosb3e68612017-06-29 12:02:07 -07004593 } else
4594 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004595 return 0;
4596}
4597
Todd Kjos72196392017-06-29 12:02:02 -07004598static void binder_release_work(struct binder_proc *proc,
4599 struct list_head *list)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004600{
4601 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09004602
Todd Kjos72196392017-06-29 12:02:02 -07004603 while (1) {
4604 w = binder_dequeue_work_head(proc, list);
4605 if (!w)
4606 return;
4607
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004608 switch (w->type) {
4609 case BINDER_WORK_TRANSACTION: {
4610 struct binder_transaction *t;
4611
4612 t = container_of(w, struct binder_transaction, work);
Martijn Coenenfb2c4452017-11-13 10:06:08 +01004613
4614 binder_cleanup_transaction(t, "process died.",
4615 BR_DEAD_REPLY);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004616 } break;
Todd Kjos26549d12017-06-29 12:01:55 -07004617 case BINDER_WORK_RETURN_ERROR: {
4618 struct binder_error *e = container_of(
4619 w, struct binder_error, work);
4620
4621 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4622 "undelivered TRANSACTION_ERROR: %u\n",
4623 e->cmd);
4624 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004625 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004626 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304627 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004628 kfree(w);
4629 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4630 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004631 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4632 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4633 struct binder_ref_death *death;
4634
4635 death = container_of(w, struct binder_ref_death, work);
4636 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08004637 "undelivered death notification, %016llx\n",
4638 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004639 kfree(death);
4640 binder_stats_deleted(BINDER_STAT_DEATH);
4641 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004642 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05304643 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07004644 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004645 break;
4646 }
4647 }
4648
4649}
4650
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004651static struct binder_thread *binder_get_thread_ilocked(
4652 struct binder_proc *proc, struct binder_thread *new_thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004653{
4654 struct binder_thread *thread = NULL;
4655 struct rb_node *parent = NULL;
4656 struct rb_node **p = &proc->threads.rb_node;
4657
4658 while (*p) {
4659 parent = *p;
4660 thread = rb_entry(parent, struct binder_thread, rb_node);
4661
4662 if (current->pid < thread->pid)
4663 p = &(*p)->rb_left;
4664 else if (current->pid > thread->pid)
4665 p = &(*p)->rb_right;
4666 else
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004667 return thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004668 }
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004669 if (!new_thread)
4670 return NULL;
4671 thread = new_thread;
4672 binder_stats_created(BINDER_STAT_THREAD);
4673 thread->proc = proc;
4674 thread->pid = current->pid;
Martijn Coenence388e02017-06-06 17:04:42 -07004675 get_task_struct(current);
4676 thread->task = current;
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004677 atomic_set(&thread->tmp_ref, 0);
4678 init_waitqueue_head(&thread->wait);
4679 INIT_LIST_HEAD(&thread->todo);
4680 rb_link_node(&thread->rb_node, parent, p);
4681 rb_insert_color(&thread->rb_node, &proc->threads);
4682 thread->looper_need_return = true;
4683 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4684 thread->return_error.cmd = BR_OK;
4685 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4686 thread->reply_error.cmd = BR_OK;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004687 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004688 return thread;
4689}
4690
4691static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4692{
4693 struct binder_thread *thread;
4694 struct binder_thread *new_thread;
4695
4696 binder_inner_proc_lock(proc);
4697 thread = binder_get_thread_ilocked(proc, NULL);
4698 binder_inner_proc_unlock(proc);
4699 if (!thread) {
4700 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4701 if (new_thread == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004702 return NULL;
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004703 binder_inner_proc_lock(proc);
4704 thread = binder_get_thread_ilocked(proc, new_thread);
4705 binder_inner_proc_unlock(proc);
4706 if (thread != new_thread)
4707 kfree(new_thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004708 }
4709 return thread;
4710}
4711
Todd Kjos7a4408c2017-06-29 12:01:57 -07004712static void binder_free_proc(struct binder_proc *proc)
4713{
4714 BUG_ON(!list_empty(&proc->todo));
4715 BUG_ON(!list_empty(&proc->delivered_death));
4716 binder_alloc_deferred_release(&proc->alloc);
4717 put_task_struct(proc->tsk);
4718 binder_stats_deleted(BINDER_STAT_PROC);
4719 kfree(proc);
4720}
4721
4722static void binder_free_thread(struct binder_thread *thread)
4723{
4724 BUG_ON(!list_empty(&thread->todo));
4725 binder_stats_deleted(BINDER_STAT_THREAD);
4726 binder_proc_dec_tmpref(thread->proc);
Martijn Coenence388e02017-06-06 17:04:42 -07004727 put_task_struct(thread->task);
Todd Kjos7a4408c2017-06-29 12:01:57 -07004728 kfree(thread);
4729}
4730
4731static int binder_thread_release(struct binder_proc *proc,
4732 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004733{
4734 struct binder_transaction *t;
4735 struct binder_transaction *send_reply = NULL;
4736 int active_transactions = 0;
Todd Kjos7a4408c2017-06-29 12:01:57 -07004737 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004738
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004739 binder_inner_proc_lock(thread->proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07004740 /*
4741 * take a ref on the proc so it survives
4742 * after we remove this thread from proc->threads.
4743 * The corresponding dec is when we actually
4744 * free the thread in binder_free_thread()
4745 */
4746 proc->tmp_ref++;
4747 /*
4748 * take a ref on this thread to ensure it
4749 * survives while we are releasing it
4750 */
4751 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004752 rb_erase(&thread->rb_node, &proc->threads);
4753 t = thread->transaction_stack;
Todd Kjos7a4408c2017-06-29 12:01:57 -07004754 if (t) {
4755 spin_lock(&t->lock);
4756 if (t->to_thread == thread)
4757 send_reply = t;
4758 }
4759 thread->is_dead = true;
4760
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004761 while (t) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07004762 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004763 active_transactions++;
4764 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05304765 "release %d:%d transaction %d %s, still active\n",
4766 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004767 t->debug_id,
4768 (t->to_thread == thread) ? "in" : "out");
4769
4770 if (t->to_thread == thread) {
4771 t->to_proc = NULL;
4772 t->to_thread = NULL;
4773 if (t->buffer) {
4774 t->buffer->transaction = NULL;
4775 t->buffer = NULL;
4776 }
4777 t = t->to_parent;
4778 } else if (t->from == thread) {
4779 t->from = NULL;
4780 t = t->from_parent;
4781 } else
4782 BUG();
Todd Kjos7a4408c2017-06-29 12:01:57 -07004783 spin_unlock(&last_t->lock);
4784 if (t)
4785 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004786 }
Martijn Coenenf5cb7792018-01-05 11:27:07 +01004787
4788 /*
4789 * If this thread used poll, make sure we remove the waitqueue
4790 * from any epoll data structures holding it with POLLFREE.
4791 * waitqueue_active() is safe to use here because we're holding
4792 * the inner lock.
4793 */
4794 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4795 waitqueue_active(&thread->wait)) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08004796 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
Martijn Coenenf5cb7792018-01-05 11:27:07 +01004797 }
4798
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07004799 binder_inner_proc_unlock(thread->proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07004800
Martijn Coenen5eeb2ca2018-02-16 09:47:15 +01004801 /*
4802 * This is needed to avoid races between wake_up_poll() above and
4803 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4804 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4805 * lock, so we can be sure it's done after calling synchronize_rcu().
4806 */
4807 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4808 synchronize_rcu();
4809
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004810 if (send_reply)
4811 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
Todd Kjos72196392017-06-29 12:02:02 -07004812 binder_release_work(proc, &thread->todo);
Todd Kjos7a4408c2017-06-29 12:01:57 -07004813 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004814 return active_transactions;
4815}
4816
Al Viroafc9a422017-07-03 06:39:46 -04004817static __poll_t binder_poll(struct file *filp,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004818 struct poll_table_struct *wait)
4819{
4820 struct binder_proc *proc = filp->private_data;
4821 struct binder_thread *thread = NULL;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004822 bool wait_for_proc_work;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004823
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004824 thread = binder_get_thread(proc);
Eric Biggersf8898262018-01-30 23:11:24 -08004825 if (!thread)
4826 return POLLERR;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004827
Martijn Coenen0b89d692017-06-29 12:02:06 -07004828 binder_inner_proc_lock(thread->proc);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004829 thread->looper |= BINDER_LOOPER_STATE_POLL;
4830 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4831
Martijn Coenen0b89d692017-06-29 12:02:06 -07004832 binder_inner_proc_unlock(thread->proc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004833
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004834 poll_wait(filp, &thread->wait, wait);
4835
Martijn Coenen66b83a42017-10-09 14:26:56 +02004836 if (binder_has_work(thread, wait_for_proc_work))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08004837 return EPOLLIN;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004838
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004839 return 0;
4840}
4841
Tair Rzayev78260ac2014-06-03 22:27:21 +03004842static int binder_ioctl_write_read(struct file *filp,
4843 unsigned int cmd, unsigned long arg,
4844 struct binder_thread *thread)
4845{
4846 int ret = 0;
4847 struct binder_proc *proc = filp->private_data;
4848 unsigned int size = _IOC_SIZE(cmd);
4849 void __user *ubuf = (void __user *)arg;
4850 struct binder_write_read bwr;
4851
4852 if (size != sizeof(struct binder_write_read)) {
4853 ret = -EINVAL;
4854 goto out;
4855 }
4856 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4857 ret = -EFAULT;
4858 goto out;
4859 }
4860 binder_debug(BINDER_DEBUG_READ_WRITE,
4861 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4862 proc->pid, thread->pid,
4863 (u64)bwr.write_size, (u64)bwr.write_buffer,
4864 (u64)bwr.read_size, (u64)bwr.read_buffer);
4865
4866 if (bwr.write_size > 0) {
4867 ret = binder_thread_write(proc, thread,
4868 bwr.write_buffer,
4869 bwr.write_size,
4870 &bwr.write_consumed);
4871 trace_binder_write_done(ret);
4872 if (ret < 0) {
4873 bwr.read_consumed = 0;
4874 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4875 ret = -EFAULT;
4876 goto out;
4877 }
4878 }
4879 if (bwr.read_size > 0) {
4880 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4881 bwr.read_size,
4882 &bwr.read_consumed,
4883 filp->f_flags & O_NONBLOCK);
4884 trace_binder_read_done(ret);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004885 binder_inner_proc_lock(proc);
4886 if (!binder_worklist_empty_ilocked(&proc->todo))
Martijn Coenen408c68b2017-08-31 10:04:19 +02004887 binder_wakeup_proc_ilocked(proc);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02004888 binder_inner_proc_unlock(proc);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004889 if (ret < 0) {
4890 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4891 ret = -EFAULT;
4892 goto out;
4893 }
4894 }
4895 binder_debug(BINDER_DEBUG_READ_WRITE,
4896 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4897 proc->pid, thread->pid,
4898 (u64)bwr.write_consumed, (u64)bwr.write_size,
4899 (u64)bwr.read_consumed, (u64)bwr.read_size);
4900 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4901 ret = -EFAULT;
4902 goto out;
4903 }
4904out:
4905 return ret;
4906}
4907
Todd Kjos00bac142019-01-14 09:10:21 -08004908static int binder_ioctl_set_ctx_mgr(struct file *filp,
4909 struct flat_binder_object *fbo)
Tair Rzayev78260ac2014-06-03 22:27:21 +03004910{
4911 int ret = 0;
4912 struct binder_proc *proc = filp->private_data;
Martijn Coenen342e5c92017-02-03 14:40:46 -08004913 struct binder_context *context = proc->context;
Todd Kjosc44b1232017-06-29 12:01:43 -07004914 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004915 kuid_t curr_euid = current_euid();
4916
Todd Kjosc44b1232017-06-29 12:01:43 -07004917 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen342e5c92017-02-03 14:40:46 -08004918 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004919 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4920 ret = -EBUSY;
4921 goto out;
4922 }
Stephen Smalley79af7302015-01-21 10:54:10 -05004923 ret = security_binder_set_context_mgr(proc->tsk);
4924 if (ret < 0)
4925 goto out;
Martijn Coenen342e5c92017-02-03 14:40:46 -08004926 if (uid_valid(context->binder_context_mgr_uid)) {
4927 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004928 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4929 from_kuid(&init_user_ns, curr_euid),
4930 from_kuid(&init_user_ns,
Martijn Coenen342e5c92017-02-03 14:40:46 -08004931 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03004932 ret = -EPERM;
4933 goto out;
4934 }
4935 } else {
Martijn Coenen342e5c92017-02-03 14:40:46 -08004936 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03004937 }
Todd Kjos00bac142019-01-14 09:10:21 -08004938 new_node = binder_new_node(proc, fbo);
Todd Kjosc44b1232017-06-29 12:01:43 -07004939 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03004940 ret = -ENOMEM;
4941 goto out;
4942 }
Todd Kjos673068e2017-06-29 12:02:03 -07004943 binder_node_lock(new_node);
Todd Kjosc44b1232017-06-29 12:01:43 -07004944 new_node->local_weak_refs++;
4945 new_node->local_strong_refs++;
4946 new_node->has_strong_ref = 1;
4947 new_node->has_weak_ref = 1;
4948 context->binder_context_mgr_node = new_node;
Todd Kjos673068e2017-06-29 12:02:03 -07004949 binder_node_unlock(new_node);
Todd Kjosadc18842017-06-29 12:01:59 -07004950 binder_put_node(new_node);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004951out:
Todd Kjosc44b1232017-06-29 12:01:43 -07004952 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03004953 return ret;
4954}
4955
Martijn Coenen4bc6ad92018-08-25 13:50:56 -07004956static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4957 struct binder_node_info_for_ref *info)
4958{
4959 struct binder_node *node;
4960 struct binder_context *context = proc->context;
4961 __u32 handle = info->handle;
4962
4963 if (info->strong_count || info->weak_count || info->reserved1 ||
4964 info->reserved2 || info->reserved3) {
4965 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4966 proc->pid);
4967 return -EINVAL;
4968 }
4969
4970 /* This ioctl may only be used by the context manager */
4971 mutex_lock(&context->context_mgr_node_lock);
4972 if (!context->binder_context_mgr_node ||
4973 context->binder_context_mgr_node->proc != proc) {
4974 mutex_unlock(&context->context_mgr_node_lock);
4975 return -EPERM;
4976 }
4977 mutex_unlock(&context->context_mgr_node_lock);
4978
4979 node = binder_get_node_from_ref(proc, handle, true, NULL);
4980 if (!node)
4981 return -EINVAL;
4982
4983 info->strong_count = node->local_strong_refs +
4984 node->internal_strong_refs;
4985 info->weak_count = node->local_weak_refs;
4986
4987 binder_put_node(node);
4988
4989 return 0;
4990}
4991
Colin Crossabcc6152017-08-31 10:04:24 +02004992static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4993 struct binder_node_debug_info *info)
4994{
4995 struct rb_node *n;
4996 binder_uintptr_t ptr = info->ptr;
4997
4998 memset(info, 0, sizeof(*info));
4999
5000 binder_inner_proc_lock(proc);
5001 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5002 struct binder_node *node = rb_entry(n, struct binder_node,
5003 rb_node);
5004 if (node->ptr > ptr) {
5005 info->ptr = node->ptr;
5006 info->cookie = node->cookie;
5007 info->has_strong_ref = node->has_strong_ref;
5008 info->has_weak_ref = node->has_weak_ref;
5009 break;
5010 }
5011 }
5012 binder_inner_proc_unlock(proc);
5013
5014 return 0;
5015}
5016
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005017static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5018{
5019 int ret;
5020 struct binder_proc *proc = filp->private_data;
5021 struct binder_thread *thread;
5022 unsigned int size = _IOC_SIZE(cmd);
5023 void __user *ubuf = (void __user *)arg;
5024
Tair Rzayev78260ac2014-06-03 22:27:21 +03005025 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5026 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005027
Sherry Yang4175e2b2017-08-23 08:46:40 -07005028 binder_selftest_alloc(&proc->alloc);
5029
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005030 trace_binder_ioctl(cmd, arg);
5031
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005032 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5033 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005034 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005035
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005036 thread = binder_get_thread(proc);
5037 if (thread == NULL) {
5038 ret = -ENOMEM;
5039 goto err;
5040 }
5041
5042 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03005043 case BINDER_WRITE_READ:
5044 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5045 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005046 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005047 break;
Todd Kjosb3e68612017-06-29 12:02:07 -07005048 case BINDER_SET_MAX_THREADS: {
5049 int max_threads;
5050
5051 if (copy_from_user(&max_threads, ubuf,
5052 sizeof(max_threads))) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005053 ret = -EINVAL;
5054 goto err;
5055 }
Todd Kjosb3e68612017-06-29 12:02:07 -07005056 binder_inner_proc_lock(proc);
5057 proc->max_threads = max_threads;
5058 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005059 break;
Todd Kjosb3e68612017-06-29 12:02:07 -07005060 }
Todd Kjos00bac142019-01-14 09:10:21 -08005061 case BINDER_SET_CONTEXT_MGR_EXT: {
5062 struct flat_binder_object fbo;
5063
5064 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5065 ret = -EINVAL;
5066 goto err;
5067 }
5068 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5069 if (ret)
5070 goto err;
5071 break;
5072 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005073 case BINDER_SET_CONTEXT_MGR:
Todd Kjos00bac142019-01-14 09:10:21 -08005074 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
Tair Rzayev78260ac2014-06-03 22:27:21 +03005075 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005076 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005077 break;
5078 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05305079 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005080 proc->pid, thread->pid);
Todd Kjos7a4408c2017-06-29 12:01:57 -07005081 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005082 thread = NULL;
5083 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02005084 case BINDER_VERSION: {
5085 struct binder_version __user *ver = ubuf;
5086
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005087 if (size != sizeof(struct binder_version)) {
5088 ret = -EINVAL;
5089 goto err;
5090 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02005091 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5092 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005093 ret = -EINVAL;
5094 goto err;
5095 }
5096 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02005097 }
Martijn Coenen4bc6ad92018-08-25 13:50:56 -07005098 case BINDER_GET_NODE_INFO_FOR_REF: {
5099 struct binder_node_info_for_ref info;
5100
5101 if (copy_from_user(&info, ubuf, sizeof(info))) {
5102 ret = -EFAULT;
5103 goto err;
5104 }
5105
5106 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5107 if (ret < 0)
5108 goto err;
5109
5110 if (copy_to_user(ubuf, &info, sizeof(info))) {
5111 ret = -EFAULT;
5112 goto err;
5113 }
5114
5115 break;
5116 }
Colin Crossabcc6152017-08-31 10:04:24 +02005117 case BINDER_GET_NODE_DEBUG_INFO: {
5118 struct binder_node_debug_info info;
5119
5120 if (copy_from_user(&info, ubuf, sizeof(info))) {
5121 ret = -EFAULT;
5122 goto err;
5123 }
5124
5125 ret = binder_ioctl_get_node_debug_info(proc, &info);
5126 if (ret < 0)
5127 goto err;
5128
5129 if (copy_to_user(ubuf, &info, sizeof(info))) {
5130 ret = -EFAULT;
5131 goto err;
5132 }
5133 break;
5134 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005135 default:
5136 ret = -EINVAL;
5137 goto err;
5138 }
5139 ret = 0;
5140err:
5141 if (thread)
Todd Kjos08dabce2017-06-29 12:01:49 -07005142 thread->looper_need_return = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005143 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5144 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05305145 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005146err_unlocked:
5147 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005148 return ret;
5149}
5150
5151static void binder_vma_open(struct vm_area_struct *vma)
5152{
5153 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09005154
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005155 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05305156 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005157 proc->pid, vma->vm_start, vma->vm_end,
5158 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5159 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005160}
5161
5162static void binder_vma_close(struct vm_area_struct *vma)
5163{
5164 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09005165
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005166 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05305167 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005168 proc->pid, vma->vm_start, vma->vm_end,
5169 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5170 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjos19c98722017-06-29 12:01:40 -07005171 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005172 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
5173}
5174
Souptick Joardere19f70a2018-04-23 21:54:00 +05305175static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
Vinayak Menonddac7d52014-06-02 18:17:59 +05305176{
5177 return VM_FAULT_SIGBUS;
5178}
5179
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07005180static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005181 .open = binder_vma_open,
5182 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05305183 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005184};
5185
Todd Kjos19c98722017-06-29 12:01:40 -07005186static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5187{
5188 int ret;
5189 struct binder_proc *proc = filp->private_data;
5190 const char *failure_string;
5191
5192 if (proc->tsk != current->group_leader)
5193 return -EINVAL;
5194
5195 if ((vma->vm_end - vma->vm_start) > SZ_4M)
5196 vma->vm_end = vma->vm_start + SZ_4M;
5197
5198 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5199 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5200 __func__, proc->pid, vma->vm_start, vma->vm_end,
5201 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5202 (unsigned long)pgprot_val(vma->vm_page_prot));
5203
5204 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5205 ret = -EPERM;
5206 failure_string = "bad vm_flags";
5207 goto err_bad_arg;
5208 }
Minchan Kim720c2412018-05-07 23:15:37 +09005209 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5210 vma->vm_flags &= ~VM_MAYWRITE;
5211
Todd Kjos19c98722017-06-29 12:01:40 -07005212 vma->vm_ops = &binder_vm_ops;
5213 vma->vm_private_data = proc;
5214
5215 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5216 if (ret)
5217 return ret;
Todd Kjos7f3dc002017-11-27 09:32:33 -08005218 mutex_lock(&proc->files_lock);
Todd Kjos19c98722017-06-29 12:01:40 -07005219 proc->files = get_files_struct(current);
Todd Kjos7f3dc002017-11-27 09:32:33 -08005220 mutex_unlock(&proc->files_lock);
Todd Kjos19c98722017-06-29 12:01:40 -07005221 return 0;
5222
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005223err_bad_arg:
Elad Wexler00c41cd2017-12-29 11:03:37 +02005224 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005225 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5226 return ret;
5227}
5228
5229static int binder_open(struct inode *nodp, struct file *filp)
5230{
5231 struct binder_proc *proc;
Martijn Coenenac4812c2017-02-03 14:40:48 -08005232 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005233
Elad Wexler00c41cd2017-12-29 11:03:37 +02005234 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005235 current->group_leader->pid, current->pid);
5236
5237 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5238 if (proc == NULL)
5239 return -ENOMEM;
Todd Kjos9630fe82017-06-29 12:02:00 -07005240 spin_lock_init(&proc->inner_lock);
5241 spin_lock_init(&proc->outer_lock);
Todd Kjosc4ea41b2017-06-29 12:01:36 -07005242 get_task_struct(current->group_leader);
5243 proc->tsk = current->group_leader;
Todd Kjos7f3dc002017-11-27 09:32:33 -08005244 mutex_init(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005245 INIT_LIST_HEAD(&proc->todo);
Martijn Coenence388e02017-06-06 17:04:42 -07005246 if (binder_supported_policy(current->policy)) {
5247 proc->default_priority.sched_policy = current->policy;
5248 proc->default_priority.prio = current->normal_prio;
5249 } else {
5250 proc->default_priority.sched_policy = SCHED_NORMAL;
5251 proc->default_priority.prio = NICE_TO_PRIO(0);
5252 }
5253
Martijn Coenenac4812c2017-02-03 14:40:48 -08005254 binder_dev = container_of(filp->private_data, struct binder_device,
5255 miscdev);
5256 proc->context = &binder_dev->context;
Todd Kjos19c98722017-06-29 12:01:40 -07005257 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005258
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005259 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005260 proc->pid = current->group_leader->pid;
5261 INIT_LIST_HEAD(&proc->delivered_death);
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02005262 INIT_LIST_HEAD(&proc->waiting_threads);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005263 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07005264
Todd Kjosc44b1232017-06-29 12:01:43 -07005265 mutex_lock(&binder_procs_lock);
5266 hlist_add_head(&proc->proc_node, &binder_procs);
5267 mutex_unlock(&binder_procs_lock);
5268
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005269 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005270 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09005271
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005272 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08005273 /*
5274 * proc debug entries are shared between contexts, so
5275 * this will fail if the process tries to open the driver
5276 * again with a different context. The priting code will
5277 * anyway print all contexts that a given PID has, so this
5278 * is not a problem.
5279 */
Harsh Shandilya21d02dd2017-12-22 19:37:02 +05305280 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
Martijn Coenen14db3182017-02-03 14:40:47 -08005281 binder_debugfs_dir_entry_proc,
5282 (void *)(unsigned long)proc->pid,
5283 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005284 }
5285
5286 return 0;
5287}
5288
5289static int binder_flush(struct file *filp, fl_owner_t id)
5290{
5291 struct binder_proc *proc = filp->private_data;
5292
5293 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5294
5295 return 0;
5296}
5297
5298static void binder_deferred_flush(struct binder_proc *proc)
5299{
5300 struct rb_node *n;
5301 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09005302
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005303 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005304 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5305 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09005306
Todd Kjos08dabce2017-06-29 12:01:49 -07005307 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005308 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5309 wake_up_interruptible(&thread->wait);
5310 wake_count++;
5311 }
5312 }
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005313 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005314
5315 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5316 "binder_flush: %d woke %d threads\n", proc->pid,
5317 wake_count);
5318}
5319
5320static int binder_release(struct inode *nodp, struct file *filp)
5321{
5322 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09005323
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07005324 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005325 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5326
5327 return 0;
5328}
5329
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005330static int binder_node_release(struct binder_node *node, int refs)
5331{
5332 struct binder_ref *ref;
5333 int death = 0;
Todd Kjosed297212017-06-29 12:02:01 -07005334 struct binder_proc *proc = node->proc;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005335
Todd Kjos72196392017-06-29 12:02:02 -07005336 binder_release_work(proc, &node->async_todo);
Todd Kjosed297212017-06-29 12:02:01 -07005337
Todd Kjos673068e2017-06-29 12:02:03 -07005338 binder_node_lock(node);
Todd Kjosed297212017-06-29 12:02:01 -07005339 binder_inner_proc_lock(proc);
Todd Kjos72196392017-06-29 12:02:02 -07005340 binder_dequeue_work_ilocked(&node->work);
Todd Kjosadc18842017-06-29 12:01:59 -07005341 /*
5342 * The caller must have taken a temporary ref on the node,
5343 */
5344 BUG_ON(!node->tmp_refs);
5345 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
Todd Kjosed297212017-06-29 12:02:01 -07005346 binder_inner_proc_unlock(proc);
Todd Kjos673068e2017-06-29 12:02:03 -07005347 binder_node_unlock(node);
Todd Kjosed297212017-06-29 12:02:01 -07005348 binder_free_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005349
5350 return refs;
5351 }
5352
5353 node->proc = NULL;
5354 node->local_strong_refs = 0;
5355 node->local_weak_refs = 0;
Todd Kjosed297212017-06-29 12:02:01 -07005356 binder_inner_proc_unlock(proc);
Todd Kjosc44b1232017-06-29 12:01:43 -07005357
5358 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005359 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjosc44b1232017-06-29 12:01:43 -07005360 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005361
5362 hlist_for_each_entry(ref, &node->refs, node_entry) {
5363 refs++;
Martijn Coenenab51ec62017-06-29 12:02:10 -07005364 /*
5365 * Need the node lock to synchronize
5366 * with new notification requests and the
5367 * inner lock to synchronize with queued
5368 * death notifications.
5369 */
5370 binder_inner_proc_lock(ref->proc);
5371 if (!ref->death) {
5372 binder_inner_proc_unlock(ref->proc);
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08005373 continue;
Martijn Coenenab51ec62017-06-29 12:02:10 -07005374 }
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005375
5376 death++;
5377
Martijn Coenenab51ec62017-06-29 12:02:10 -07005378 BUG_ON(!list_empty(&ref->death->work.entry));
5379 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5380 binder_enqueue_work_ilocked(&ref->death->work,
5381 &ref->proc->todo);
Martijn Coenen408c68b2017-08-31 10:04:19 +02005382 binder_wakeup_proc_ilocked(ref->proc);
Todd Kjos72196392017-06-29 12:02:02 -07005383 binder_inner_proc_unlock(ref->proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005384 }
5385
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005386 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5387 "node %d now dead, refs %d, death %d\n",
5388 node->debug_id, refs, death);
Todd Kjos673068e2017-06-29 12:02:03 -07005389 binder_node_unlock(node);
Todd Kjosadc18842017-06-29 12:01:59 -07005390 binder_put_node(node);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005391
5392 return refs;
5393}
5394
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005395static void binder_deferred_release(struct binder_proc *proc)
5396{
Martijn Coenen342e5c92017-02-03 14:40:46 -08005397 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005398 struct rb_node *n;
Todd Kjos19c98722017-06-29 12:01:40 -07005399 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005400
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005401 BUG_ON(proc->files);
5402
Todd Kjosc44b1232017-06-29 12:01:43 -07005403 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005404 hlist_del(&proc->proc_node);
Todd Kjosc44b1232017-06-29 12:01:43 -07005405 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005406
Todd Kjosc44b1232017-06-29 12:01:43 -07005407 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen342e5c92017-02-03 14:40:46 -08005408 if (context->binder_context_mgr_node &&
5409 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005410 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005411 "%s: %d context_mgr_node gone\n",
5412 __func__, proc->pid);
Martijn Coenen342e5c92017-02-03 14:40:46 -08005413 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005414 }
Todd Kjosc44b1232017-06-29 12:01:43 -07005415 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005416 binder_inner_proc_lock(proc);
Todd Kjos7a4408c2017-06-29 12:01:57 -07005417 /*
5418 * Make sure proc stays alive after we
5419 * remove all the threads
5420 */
5421 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005422
Todd Kjos7a4408c2017-06-29 12:01:57 -07005423 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005424 threads = 0;
5425 active_transactions = 0;
5426 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005427 struct binder_thread *thread;
5428
5429 thread = rb_entry(n, struct binder_thread, rb_node);
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005430 binder_inner_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005431 threads++;
Todd Kjos7a4408c2017-06-29 12:01:57 -07005432 active_transactions += binder_thread_release(proc, thread);
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005433 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005434 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005435
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005436 nodes = 0;
5437 incoming_refs = 0;
5438 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005439 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005440
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005441 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005442 nodes++;
Todd Kjosadc18842017-06-29 12:01:59 -07005443 /*
5444 * take a temporary ref on the node before
5445 * calling binder_node_release() which will either
5446 * kfree() the node or call binder_put_node()
5447 */
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005448 binder_inc_node_tmpref_ilocked(node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005449 rb_erase(&node->rb_node, &proc->nodes);
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005450 binder_inner_proc_unlock(proc);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01005451 incoming_refs = binder_node_release(node, incoming_refs);
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005452 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005453 }
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005454 binder_inner_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005455
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005456 outgoing_refs = 0;
Todd Kjos2c1838d2017-06-29 12:02:08 -07005457 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005458 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005459 struct binder_ref *ref;
5460
5461 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005462 outgoing_refs++;
Todd Kjos2c1838d2017-06-29 12:02:08 -07005463 binder_cleanup_ref_olocked(ref);
5464 binder_proc_unlock(proc);
Todd Kjos372e3142017-06-29 12:01:58 -07005465 binder_free_ref(ref);
Todd Kjos2c1838d2017-06-29 12:02:08 -07005466 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005467 }
Todd Kjos2c1838d2017-06-29 12:02:08 -07005468 binder_proc_unlock(proc);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01005469
Todd Kjos72196392017-06-29 12:02:02 -07005470 binder_release_work(proc, &proc->todo);
5471 binder_release_work(proc, &proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005472
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005473 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjos19c98722017-06-29 12:01:40 -07005474 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01005475 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjos19c98722017-06-29 12:01:40 -07005476 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005477
Todd Kjos7a4408c2017-06-29 12:01:57 -07005478 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005479}
5480
5481static void binder_deferred_func(struct work_struct *work)
5482{
5483 struct binder_proc *proc;
5484 struct files_struct *files;
5485
5486 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09005487
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005488 do {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005489 mutex_lock(&binder_deferred_lock);
5490 if (!hlist_empty(&binder_deferred_list)) {
5491 proc = hlist_entry(binder_deferred_list.first,
5492 struct binder_proc, deferred_work_node);
5493 hlist_del_init(&proc->deferred_work_node);
5494 defer = proc->deferred_work;
5495 proc->deferred_work = 0;
5496 } else {
5497 proc = NULL;
5498 defer = 0;
5499 }
5500 mutex_unlock(&binder_deferred_lock);
5501
5502 files = NULL;
5503 if (defer & BINDER_DEFERRED_PUT_FILES) {
Todd Kjos7f3dc002017-11-27 09:32:33 -08005504 mutex_lock(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005505 files = proc->files;
5506 if (files)
5507 proc->files = NULL;
Todd Kjos7f3dc002017-11-27 09:32:33 -08005508 mutex_unlock(&proc->files_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005509 }
5510
5511 if (defer & BINDER_DEFERRED_FLUSH)
5512 binder_deferred_flush(proc);
5513
5514 if (defer & BINDER_DEFERRED_RELEASE)
5515 binder_deferred_release(proc); /* frees proc */
5516
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005517 if (files)
5518 put_files_struct(files);
5519 } while (proc);
5520}
5521static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5522
5523static void
5524binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5525{
5526 mutex_lock(&binder_deferred_lock);
5527 proc->deferred_work |= defer;
5528 if (hlist_unhashed(&proc->deferred_work_node)) {
5529 hlist_add_head(&proc->deferred_work_node,
5530 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05305531 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005532 }
5533 mutex_unlock(&binder_deferred_lock);
5534}
5535
Todd Kjos5f2f6362017-06-29 12:02:09 -07005536static void print_binder_transaction_ilocked(struct seq_file *m,
5537 struct binder_proc *proc,
5538 const char *prefix,
5539 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005540{
Todd Kjos5f2f6362017-06-29 12:02:09 -07005541 struct binder_proc *to_proc;
5542 struct binder_buffer *buffer = t->buffer;
5543
Todd Kjos7a4408c2017-06-29 12:01:57 -07005544 spin_lock(&t->lock);
Todd Kjos5f2f6362017-06-29 12:02:09 -07005545 to_proc = t->to_proc;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005546 seq_printf(m,
Martijn Coenence388e02017-06-06 17:04:42 -07005547 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005548 prefix, t->debug_id, t,
5549 t->from ? t->from->proc->pid : 0,
5550 t->from ? t->from->pid : 0,
Todd Kjos5f2f6362017-06-29 12:02:09 -07005551 to_proc ? to_proc->pid : 0,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005552 t->to_thread ? t->to_thread->pid : 0,
Martijn Coenence388e02017-06-06 17:04:42 -07005553 t->code, t->flags, t->priority.sched_policy,
5554 t->priority.prio, t->need_reply);
Todd Kjos7a4408c2017-06-29 12:01:57 -07005555 spin_unlock(&t->lock);
5556
Todd Kjos5f2f6362017-06-29 12:02:09 -07005557 if (proc != to_proc) {
5558 /*
5559 * Can only safely deref buffer if we are holding the
5560 * correct proc inner lock for this node
5561 */
5562 seq_puts(m, "\n");
5563 return;
5564 }
5565
5566 if (buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005567 seq_puts(m, " buffer free\n");
5568 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005569 }
Todd Kjos5f2f6362017-06-29 12:02:09 -07005570 if (buffer->target_node)
5571 seq_printf(m, " node %d", buffer->target_node->debug_id);
Todd Kjos8ca86f12018-02-07 13:57:37 -08005572 seq_printf(m, " size %zd:%zd data %pK\n",
Todd Kjos5f2f6362017-06-29 12:02:09 -07005573 buffer->data_size, buffer->offsets_size,
Todd Kjos3d4f1ad2019-02-08 10:35:20 -08005574 buffer->user_data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005575}
5576
Todd Kjos5f2f6362017-06-29 12:02:09 -07005577static void print_binder_work_ilocked(struct seq_file *m,
5578 struct binder_proc *proc,
5579 const char *prefix,
5580 const char *transaction_prefix,
5581 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005582{
5583 struct binder_node *node;
5584 struct binder_transaction *t;
5585
5586 switch (w->type) {
5587 case BINDER_WORK_TRANSACTION:
5588 t = container_of(w, struct binder_transaction, work);
Todd Kjos5f2f6362017-06-29 12:02:09 -07005589 print_binder_transaction_ilocked(
5590 m, proc, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005591 break;
Todd Kjos26549d12017-06-29 12:01:55 -07005592 case BINDER_WORK_RETURN_ERROR: {
5593 struct binder_error *e = container_of(
5594 w, struct binder_error, work);
5595
5596 seq_printf(m, "%stransaction error: %u\n",
5597 prefix, e->cmd);
5598 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005599 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005600 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005601 break;
5602 case BINDER_WORK_NODE:
5603 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08005604 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5605 prefix, node->debug_id,
5606 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005607 break;
5608 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005609 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005610 break;
5611 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005612 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005613 break;
5614 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005615 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005616 break;
5617 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005618 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005619 break;
5620 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005621}
5622
Todd Kjos72196392017-06-29 12:02:02 -07005623static void print_binder_thread_ilocked(struct seq_file *m,
5624 struct binder_thread *thread,
5625 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005626{
5627 struct binder_transaction *t;
5628 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005629 size_t start_pos = m->count;
5630 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005631
Todd Kjos7a4408c2017-06-29 12:01:57 -07005632 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos08dabce2017-06-29 12:01:49 -07005633 thread->pid, thread->looper,
Todd Kjos7a4408c2017-06-29 12:01:57 -07005634 thread->looper_need_return,
5635 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005636 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005637 t = thread->transaction_stack;
5638 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005639 if (t->from == thread) {
Todd Kjos5f2f6362017-06-29 12:02:09 -07005640 print_binder_transaction_ilocked(m, thread->proc,
5641 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005642 t = t->from_parent;
5643 } else if (t->to_thread == thread) {
Todd Kjos5f2f6362017-06-29 12:02:09 -07005644 print_binder_transaction_ilocked(m, thread->proc,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005645 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005646 t = t->to_parent;
5647 } else {
Todd Kjos5f2f6362017-06-29 12:02:09 -07005648 print_binder_transaction_ilocked(m, thread->proc,
5649 " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005650 t = NULL;
5651 }
5652 }
5653 list_for_each_entry(w, &thread->todo, entry) {
Todd Kjos5f2f6362017-06-29 12:02:09 -07005654 print_binder_work_ilocked(m, thread->proc, " ",
Todd Kjos72196392017-06-29 12:02:02 -07005655 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005656 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005657 if (!print_always && m->count == header_pos)
5658 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005659}
5660
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005661static void print_binder_node_nilocked(struct seq_file *m,
5662 struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005663{
5664 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005665 struct binder_work *w;
5666 int count;
5667
5668 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08005669 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005670 count++;
5671
Martijn Coenence388e02017-06-06 17:04:42 -07005672 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
Arve Hjønnevågda498892014-02-21 14:40:26 -08005673 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Martijn Coenence388e02017-06-06 17:04:42 -07005674 node->sched_policy, node->min_priority,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005675 node->has_strong_ref, node->has_weak_ref,
5676 node->local_strong_refs, node->local_weak_refs,
Todd Kjosadc18842017-06-29 12:01:59 -07005677 node->internal_strong_refs, count, node->tmp_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005678 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005679 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08005680 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005681 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005682 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005683 seq_puts(m, "\n");
Todd Kjos72196392017-06-29 12:02:02 -07005684 if (node->proc) {
Todd Kjos72196392017-06-29 12:02:02 -07005685 list_for_each_entry(w, &node->async_todo, entry)
Todd Kjos5f2f6362017-06-29 12:02:09 -07005686 print_binder_work_ilocked(m, node->proc, " ",
Todd Kjos72196392017-06-29 12:02:02 -07005687 " pending async transaction", w);
Todd Kjos72196392017-06-29 12:02:02 -07005688 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005689}
5690
Todd Kjos2c1838d2017-06-29 12:02:08 -07005691static void print_binder_ref_olocked(struct seq_file *m,
5692 struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005693{
Todd Kjos673068e2017-06-29 12:02:03 -07005694 binder_node_lock(ref->node);
Todd Kjos372e3142017-06-29 12:01:58 -07005695 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5696 ref->data.debug_id, ref->data.desc,
5697 ref->node->proc ? "" : "dead ",
5698 ref->node->debug_id, ref->data.strong,
5699 ref->data.weak, ref->death);
Todd Kjos673068e2017-06-29 12:02:03 -07005700 binder_node_unlock(ref->node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005701}
5702
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005703static void print_binder_proc(struct seq_file *m,
5704 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005705{
5706 struct binder_work *w;
5707 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005708 size_t start_pos = m->count;
5709 size_t header_pos;
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005710 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005711
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005712 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08005713 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005714 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005715
Todd Kjos72196392017-06-29 12:02:02 -07005716 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005717 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
Todd Kjos72196392017-06-29 12:02:02 -07005718 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005719 rb_node), print_all);
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005720
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005721 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005722 struct binder_node *node = rb_entry(n, struct binder_node,
5723 rb_node);
Todd Kjos4a7c1ae2018-12-05 15:19:26 -08005724 if (!print_all && !node->has_async_transaction)
5725 continue;
5726
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005727 /*
5728 * take a temporary reference on the node so it
5729 * survives and isn't removed from the tree
5730 * while we print it.
5731 */
5732 binder_inc_node_tmpref_ilocked(node);
5733 /* Need to drop inner lock to take node lock */
5734 binder_inner_proc_unlock(proc);
5735 if (last_node)
5736 binder_put_node(last_node);
5737 binder_node_inner_lock(node);
5738 print_binder_node_nilocked(m, node);
5739 binder_node_inner_unlock(node);
5740 last_node = node;
5741 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005742 }
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005743 binder_inner_proc_unlock(proc);
5744 if (last_node)
5745 binder_put_node(last_node);
5746
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005747 if (print_all) {
Todd Kjos2c1838d2017-06-29 12:02:08 -07005748 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005749 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005750 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005751 n = rb_next(n))
Todd Kjos2c1838d2017-06-29 12:02:08 -07005752 print_binder_ref_olocked(m, rb_entry(n,
5753 struct binder_ref,
5754 rb_node_desc));
5755 binder_proc_unlock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005756 }
Todd Kjos19c98722017-06-29 12:01:40 -07005757 binder_alloc_print_allocated(m, &proc->alloc);
Todd Kjos72196392017-06-29 12:02:02 -07005758 binder_inner_proc_lock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005759 list_for_each_entry(w, &proc->todo, entry)
Todd Kjos5f2f6362017-06-29 12:02:09 -07005760 print_binder_work_ilocked(m, proc, " ",
5761 " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005762 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005763 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005764 break;
5765 }
Todd Kjos72196392017-06-29 12:02:02 -07005766 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005767 if (!print_all && m->count == header_pos)
5768 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005769}
5770
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005771static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005772 "BR_ERROR",
5773 "BR_OK",
5774 "BR_TRANSACTION",
5775 "BR_REPLY",
5776 "BR_ACQUIRE_RESULT",
5777 "BR_DEAD_REPLY",
5778 "BR_TRANSACTION_COMPLETE",
5779 "BR_INCREFS",
5780 "BR_ACQUIRE",
5781 "BR_RELEASE",
5782 "BR_DECREFS",
5783 "BR_ATTEMPT_ACQUIRE",
5784 "BR_NOOP",
5785 "BR_SPAWN_LOOPER",
5786 "BR_FINISHED",
5787 "BR_DEAD_BINDER",
5788 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5789 "BR_FAILED_REPLY"
5790};
5791
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005792static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005793 "BC_TRANSACTION",
5794 "BC_REPLY",
5795 "BC_ACQUIRE_RESULT",
5796 "BC_FREE_BUFFER",
5797 "BC_INCREFS",
5798 "BC_ACQUIRE",
5799 "BC_RELEASE",
5800 "BC_DECREFS",
5801 "BC_INCREFS_DONE",
5802 "BC_ACQUIRE_DONE",
5803 "BC_ATTEMPT_ACQUIRE",
5804 "BC_REGISTER_LOOPER",
5805 "BC_ENTER_LOOPER",
5806 "BC_EXIT_LOOPER",
5807 "BC_REQUEST_DEATH_NOTIFICATION",
5808 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen79802402017-02-03 14:40:51 -08005809 "BC_DEAD_BINDER_DONE",
5810 "BC_TRANSACTION_SG",
5811 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005812};
5813
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10005814static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005815 "proc",
5816 "thread",
5817 "node",
5818 "ref",
5819 "death",
5820 "transaction",
5821 "transaction_complete"
5822};
5823
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005824static void print_binder_stats(struct seq_file *m, const char *prefix,
5825 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005826{
5827 int i;
5828
5829 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005830 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005831 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07005832 int temp = atomic_read(&stats->bc[i]);
5833
5834 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005835 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07005836 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005837 }
5838
5839 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005840 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005841 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07005842 int temp = atomic_read(&stats->br[i]);
5843
5844 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005845 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07005846 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005847 }
5848
5849 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005850 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005851 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005852 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005853 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07005854 int created = atomic_read(&stats->obj_created[i]);
5855 int deleted = atomic_read(&stats->obj_deleted[i]);
5856
5857 if (created || deleted)
5858 seq_printf(m, "%s%s: active %d total %d\n",
5859 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005860 binder_objstat_strings[i],
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07005861 created - deleted,
5862 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005863 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005864}
5865
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005866static void print_binder_proc_stats(struct seq_file *m,
5867 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005868{
5869 struct binder_work *w;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02005870 struct binder_thread *thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005871 struct rb_node *n;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02005872 int count, strong, weak, ready_threads;
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005873 size_t free_async_space =
5874 binder_alloc_get_free_async_space(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005875
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005876 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08005877 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005878 count = 0;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02005879 ready_threads = 0;
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005880 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005881 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5882 count++;
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02005883
5884 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5885 ready_threads++;
5886
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005887 seq_printf(m, " threads: %d\n", count);
5888 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005889 " ready threads %d\n"
5890 " free async space %zd\n", proc->requested_threads,
5891 proc->requested_threads_started, proc->max_threads,
Martijn Coenen1b77e9d2017-08-31 10:04:18 +02005892 ready_threads,
Todd Kjos7bd7b0e2017-06-29 12:02:05 -07005893 free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005894 count = 0;
5895 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5896 count++;
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005897 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005898 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005899 count = 0;
5900 strong = 0;
5901 weak = 0;
Todd Kjos2c1838d2017-06-29 12:02:08 -07005902 binder_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005903 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5904 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5905 rb_node_desc);
5906 count++;
Todd Kjos372e3142017-06-29 12:01:58 -07005907 strong += ref->data.strong;
5908 weak += ref->data.weak;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005909 }
Todd Kjos2c1838d2017-06-29 12:02:08 -07005910 binder_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005911 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005912
Todd Kjos19c98722017-06-29 12:01:40 -07005913 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005914 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005915
Sherry Yang8ef46652017-08-31 11:56:36 -07005916 binder_alloc_print_pages(m, &proc->alloc);
5917
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005918 count = 0;
Todd Kjos72196392017-06-29 12:02:02 -07005919 binder_inner_proc_lock(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005920 list_for_each_entry(w, &proc->todo, entry) {
Todd Kjos72196392017-06-29 12:02:02 -07005921 if (w->type == BINDER_WORK_TRANSACTION)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005922 count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005923 }
Todd Kjos72196392017-06-29 12:02:02 -07005924 binder_inner_proc_unlock(proc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005925 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005926
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005927 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005928}
5929
5930
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005931static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005932{
5933 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005934 struct binder_node *node;
Todd Kjos673068e2017-06-29 12:02:03 -07005935 struct binder_node *last_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005936
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005937 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005938
Todd Kjosc44b1232017-06-29 12:01:43 -07005939 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005940 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005941 seq_puts(m, "dead nodes:\n");
Todd Kjos673068e2017-06-29 12:02:03 -07005942 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5943 /*
5944 * take a temporary reference on the node so it
5945 * survives and isn't removed from the list
5946 * while we print it.
5947 */
5948 node->tmp_refs++;
5949 spin_unlock(&binder_dead_nodes_lock);
5950 if (last_node)
5951 binder_put_node(last_node);
5952 binder_node_lock(node);
Todd Kjosda0fa9e2017-06-29 12:02:04 -07005953 print_binder_node_nilocked(m, node);
Todd Kjos673068e2017-06-29 12:02:03 -07005954 binder_node_unlock(node);
5955 last_node = node;
5956 spin_lock(&binder_dead_nodes_lock);
5957 }
Todd Kjosc44b1232017-06-29 12:01:43 -07005958 spin_unlock(&binder_dead_nodes_lock);
Todd Kjos673068e2017-06-29 12:02:03 -07005959 if (last_node)
5960 binder_put_node(last_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005961
Todd Kjosc44b1232017-06-29 12:01:43 -07005962 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005963 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005964 print_binder_proc(m, proc, 1);
Todd Kjosc44b1232017-06-29 12:01:43 -07005965 mutex_unlock(&binder_procs_lock);
Todd Kjosa60b8902017-06-29 12:02:11 -07005966
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005967 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005968}
5969
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005970static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005971{
5972 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005973
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005974 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005975
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005976 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005977
Todd Kjosc44b1232017-06-29 12:01:43 -07005978 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005979 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005980 print_binder_proc_stats(m, proc);
Todd Kjosc44b1232017-06-29 12:01:43 -07005981 mutex_unlock(&binder_procs_lock);
Todd Kjosa60b8902017-06-29 12:02:11 -07005982
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005983 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005984}
5985
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005986static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005987{
5988 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005989
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005990 seq_puts(m, "binder transactions:\n");
Todd Kjosc44b1232017-06-29 12:01:43 -07005991 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08005992 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005993 print_binder_proc(m, proc, 0);
Todd Kjosc44b1232017-06-29 12:01:43 -07005994 mutex_unlock(&binder_procs_lock);
Todd Kjosa60b8902017-06-29 12:02:11 -07005995
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005996 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09005997}
5998
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07005999static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006000{
Riley Andrews83050a42016-02-09 21:05:33 -08006001 struct binder_proc *itr;
Martijn Coenen14db3182017-02-03 14:40:47 -08006002 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006003
Todd Kjosc44b1232017-06-29 12:01:43 -07006004 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08006005 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen14db3182017-02-03 14:40:47 -08006006 if (itr->pid == pid) {
6007 seq_puts(m, "binder proc state:\n");
6008 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08006009 }
6010 }
Todd Kjosc44b1232017-06-29 12:01:43 -07006011 mutex_unlock(&binder_procs_lock);
6012
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006013 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006014}
6015
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006016static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006017 struct binder_transaction_log_entry *e)
6018{
Todd Kjosd99c7332017-06-29 12:01:53 -07006019 int debug_id = READ_ONCE(e->debug_id_done);
6020 /*
6021 * read barrier to guarantee debug_id_done read before
6022 * we print the log values
6023 */
6024 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006025 seq_printf(m,
Todd Kjosd99c7332017-06-29 12:01:53 -07006026 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006027 e->debug_id, (e->call_type == 2) ? "reply" :
6028 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen14db3182017-02-03 14:40:47 -08006029 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjos57ada2f2017-06-29 12:01:46 -07006030 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6031 e->return_error, e->return_error_param,
6032 e->return_error_line);
Todd Kjosd99c7332017-06-29 12:01:53 -07006033 /*
6034 * read-barrier to guarantee read of debug_id_done after
6035 * done printing the fields of the entry
6036 */
6037 smp_rmb();
6038 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6039 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006040}
6041
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006042static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006043{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006044 struct binder_transaction_log *log = m->private;
Todd Kjosd99c7332017-06-29 12:01:53 -07006045 unsigned int log_cur = atomic_read(&log->cur);
6046 unsigned int count;
6047 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006048 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006049
Todd Kjosd99c7332017-06-29 12:01:53 -07006050 count = log_cur + 1;
6051 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6052 0 : count % ARRAY_SIZE(log->entry);
6053 if (count > ARRAY_SIZE(log->entry) || log->full)
6054 count = ARRAY_SIZE(log->entry);
6055 for (i = 0; i < count; i++) {
6056 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6057
6058 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006059 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006060 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006061}
6062
6063static const struct file_operations binder_fops = {
6064 .owner = THIS_MODULE,
6065 .poll = binder_poll,
6066 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08006067 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006068 .mmap = binder_mmap,
6069 .open = binder_open,
6070 .flush = binder_flush,
6071 .release = binder_release,
6072};
6073
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07006074BINDER_DEBUG_ENTRY(state);
6075BINDER_DEBUG_ENTRY(stats);
6076BINDER_DEBUG_ENTRY(transactions);
6077BINDER_DEBUG_ENTRY(transaction_log);
6078
Martijn Coenenac4812c2017-02-03 14:40:48 -08006079static int __init init_binder_device(const char *name)
6080{
6081 int ret;
6082 struct binder_device *binder_device;
6083
6084 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6085 if (!binder_device)
6086 return -ENOMEM;
6087
6088 binder_device->miscdev.fops = &binder_fops;
6089 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6090 binder_device->miscdev.name = name;
6091
6092 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6093 binder_device->context.name = name;
Todd Kjosc44b1232017-06-29 12:01:43 -07006094 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenenac4812c2017-02-03 14:40:48 -08006095
6096 ret = misc_register(&binder_device->miscdev);
6097 if (ret < 0) {
6098 kfree(binder_device);
6099 return ret;
6100 }
6101
6102 hlist_add_head(&binder_device->hlist, &binder_devices);
6103
6104 return ret;
6105}
6106
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006107static int __init binder_init(void)
6108{
6109 int ret;
Christian Brauner22eb9472017-08-21 16:13:28 +02006110 char *device_name, *device_names, *device_tmp;
Martijn Coenenac4812c2017-02-03 14:40:48 -08006111 struct binder_device *device;
6112 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006113
Tetsuo Handa533dfb22017-11-29 22:29:47 +09006114 ret = binder_alloc_shrinker_init();
6115 if (ret)
6116 return ret;
Sherry Yangf2517eb2017-08-23 08:46:42 -07006117
Todd Kjosd99c7332017-06-29 12:01:53 -07006118 atomic_set(&binder_transaction_log.cur, ~0U);
6119 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6120
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07006121 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6122 if (binder_debugfs_dir_entry_root)
6123 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6124 binder_debugfs_dir_entry_root);
Martijn Coenenac4812c2017-02-03 14:40:48 -08006125
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07006126 if (binder_debugfs_dir_entry_root) {
6127 debugfs_create_file("state",
Harsh Shandilya21d02dd2017-12-22 19:37:02 +05306128 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07006129 binder_debugfs_dir_entry_root,
6130 NULL,
6131 &binder_state_fops);
6132 debugfs_create_file("stats",
Harsh Shandilya21d02dd2017-12-22 19:37:02 +05306133 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07006134 binder_debugfs_dir_entry_root,
6135 NULL,
6136 &binder_stats_fops);
6137 debugfs_create_file("transactions",
Harsh Shandilya21d02dd2017-12-22 19:37:02 +05306138 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07006139 binder_debugfs_dir_entry_root,
6140 NULL,
6141 &binder_transactions_fops);
6142 debugfs_create_file("transaction_log",
Harsh Shandilya21d02dd2017-12-22 19:37:02 +05306143 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07006144 binder_debugfs_dir_entry_root,
6145 &binder_transaction_log,
6146 &binder_transaction_log_fops);
6147 debugfs_create_file("failed_transaction_log",
Harsh Shandilya21d02dd2017-12-22 19:37:02 +05306148 0444,
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07006149 binder_debugfs_dir_entry_root,
6150 &binder_transaction_log_failed,
6151 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006152 }
Martijn Coenenac4812c2017-02-03 14:40:48 -08006153
6154 /*
6155 * Copy the module_parameter string, because we don't want to
6156 * tokenize it in-place.
6157 */
6158 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
6159 if (!device_names) {
6160 ret = -ENOMEM;
6161 goto err_alloc_device_names_failed;
6162 }
6163 strcpy(device_names, binder_devices_param);
6164
Christian Brauner22eb9472017-08-21 16:13:28 +02006165 device_tmp = device_names;
6166 while ((device_name = strsep(&device_tmp, ","))) {
Martijn Coenenac4812c2017-02-03 14:40:48 -08006167 ret = init_binder_device(device_name);
6168 if (ret)
6169 goto err_init_binder_device_failed;
6170 }
6171
6172 return ret;
6173
6174err_init_binder_device_failed:
6175 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6176 misc_deregister(&device->miscdev);
6177 hlist_del(&device->hlist);
6178 kfree(device);
6179 }
Christian Brauner22eb9472017-08-21 16:13:28 +02006180
6181 kfree(device_names);
6182
Martijn Coenenac4812c2017-02-03 14:40:48 -08006183err_alloc_device_names_failed:
6184 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6185
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006186 return ret;
6187}
6188
6189device_initcall(binder_init);
6190
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07006191#define CREATE_TRACE_POINTS
6192#include "binder_trace.h"
6193
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09006194MODULE_LICENSE("GPL v2");