blob: 20d5efdeee029bb805c2034921284580ece011d6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
Randy.Dunlapc59ede72006-01-11 12:17:46 -080010#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/ptrace.h>
18#include <linux/security.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070019#include <linux/signal.h>
Al Viroa5cb0132007-03-20 13:58:35 -040020#include <linux/audit.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070021#include <linux/pid_namespace.h>
Adrian Bunkf17d30a2008-02-06 01:36:44 -080022#include <linux/syscalls.h>
Roland McGrath3a709702009-04-07 23:21:06 -070023#include <linux/uaccess.h>
Suresh Siddha2225a122010-02-11 11:51:00 -080024#include <linux/regset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Markus Metzgerbf53de92008-12-19 15:10:24 +010026
27/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * ptrace a task: make the debugger its new parent and
29 * move it to the ptrace list.
30 *
31 * Must be called with the tasklist lock write-held.
32 */
Ingo Molnar36c8b582006-07-03 00:25:41 -070033void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
Roland McGrathf4700212008-03-24 18:36:23 -070035 BUG_ON(!list_empty(&child->ptrace_entry));
36 list_add(&child->ptrace_entry, &new_parent->ptraced);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 child->parent = new_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038}
Roland McGrath3a709702009-04-07 23:21:06 -070039
Tejun Heoe3bd0582011-03-23 10:37:01 +010040/**
41 * __ptrace_unlink - unlink ptracee and restore its execution state
42 * @child: ptracee to be unlinked
43 *
Tejun Heo0e9f0a42011-03-23 10:37:01 +010044 * Remove @child from the ptrace list, move it back to the original parent,
45 * and restore the execution state so that it conforms to the group stop
46 * state.
47 *
48 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
49 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
50 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
51 * If the ptracer is exiting, the ptracee can be in any state.
52 *
53 * After detach, the ptracee should be in a state which conforms to the
54 * group stop. If the group is stopped or in the process of stopping, the
55 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
56 * up from TASK_TRACED.
57 *
58 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
59 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
60 * to but in the opposite direction of what happens while attaching to a
61 * stopped task. However, in this direction, the intermediate RUNNING
62 * state is not hidden even from the current ptracer and if it immediately
63 * re-attaches and performs a WNOHANG wait(2), it may fail.
Tejun Heoe3bd0582011-03-23 10:37:01 +010064 *
65 * CONTEXT:
66 * write_lock_irq(tasklist_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 */
Tejun Heoe3bd0582011-03-23 10:37:01 +010068void __ptrace_unlink(struct task_struct *child)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Tejun Heoe3bd0582011-03-23 10:37:01 +010070 BUG_ON(!child->ptrace);
71
72 child->ptrace = 0;
73 child->parent = child->real_parent;
74 list_del_init(&child->ptrace_entry);
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 spin_lock(&child->sighand->siglock);
Tejun Heo0e9f0a42011-03-23 10:37:01 +010077
78 /*
79 * Reinstate GROUP_STOP_PENDING if group stop is in effect and
80 * @child isn't dead.
81 */
82 if (!(child->flags & PF_EXITING) &&
83 (child->signal->flags & SIGNAL_STOP_STOPPED ||
84 child->signal->group_stop_count))
85 child->group_stop |= GROUP_STOP_PENDING;
86
87 /*
88 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
89 * @child in the butt. Note that @resume should be used iff @child
90 * is in TASK_TRACED; otherwise, we might unduly disrupt
91 * TASK_KILLABLE sleeps.
92 */
93 if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
94 signal_wake_up(child, task_is_traced(child));
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 spin_unlock(&child->sighand->siglock);
97}
98
99/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 * Check that we have indeed attached to the thing..
101 */
102int ptrace_check_attach(struct task_struct *child, int kill)
103{
104 int ret = -ESRCH;
105
106 /*
107 * We take the read lock around doing both checks to close a
108 * possible race where someone else was tracing our child and
109 * detached between these two checks. After this locked check,
110 * we are sure that this is our traced child and that can only
111 * be changed by us so it's not changing right after this.
112 */
113 read_lock(&tasklist_lock);
Oleg Nesterovc0c0b642008-02-08 04:19:00 -0800114 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
Oleg Nesterovc0c0b642008-02-08 04:19:00 -0800115 /*
116 * child->sighand can't be NULL, release_task()
117 * does ptrace_unlink() before __exit_signal().
118 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 spin_lock_irq(&child->sighand->siglock);
Oleg Nesterov321fb562011-04-01 20:13:01 +0200120 WARN_ON_ONCE(task_is_stopped(child));
121 if (task_is_traced(child) || kill)
122 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 spin_unlock_irq(&child->sighand->siglock);
124 }
125 read_unlock(&tasklist_lock);
126
Oleg Nesterovd9ae90a2008-02-06 01:36:13 -0800127 if (!ret && !kill)
Roland McGrath85ba2d82008-07-25 19:45:58 -0700128 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 /* All systems go.. */
131 return ret;
132}
133
Stephen Smalley006ebb42008-05-19 08:32:49 -0400134int __ptrace_may_access(struct task_struct *task, unsigned int mode)
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700135{
David Howellsc69e8d92008-11-14 10:39:19 +1100136 const struct cred *cred = current_cred(), *tcred;
David Howellsb6dff3e2008-11-14 10:39:16 +1100137
Eric W. Biedermandf26c402006-06-26 00:25:59 -0700138 /* May we inspect the given task?
139 * This check is used both for attaching with ptrace
140 * and for allowing access to sensitive information in /proc.
141 *
142 * ptrace_attach denies several cases that /proc allows
143 * because setting up the necessary parent/child relationship
144 * or halting the specified task is impossible.
145 */
146 int dumpable = 0;
147 /* Don't let security modules deny introspection */
148 if (task == current)
149 return 0;
David Howellsc69e8d92008-11-14 10:39:19 +1100150 rcu_read_lock();
151 tcred = __task_cred(task);
152 if ((cred->uid != tcred->euid ||
153 cred->uid != tcred->suid ||
154 cred->uid != tcred->uid ||
155 cred->gid != tcred->egid ||
156 cred->gid != tcred->sgid ||
157 cred->gid != tcred->gid) &&
158 !capable(CAP_SYS_PTRACE)) {
159 rcu_read_unlock();
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700160 return -EPERM;
David Howellsc69e8d92008-11-14 10:39:19 +1100161 }
162 rcu_read_unlock();
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700163 smp_rmb();
Eric W. Biedermandf26c402006-06-26 00:25:59 -0700164 if (task->mm)
Kawai, Hidehiro6c5d5232007-07-19 01:48:27 -0700165 dumpable = get_dumpable(task->mm);
Eric W. Biedermandf26c402006-06-26 00:25:59 -0700166 if (!dumpable && !capable(CAP_SYS_PTRACE))
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700167 return -EPERM;
168
Ingo Molnar9e488582009-05-07 19:26:19 +1000169 return security_ptrace_access_check(task, mode);
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700170}
171
Stephen Smalley006ebb42008-05-19 08:32:49 -0400172bool ptrace_may_access(struct task_struct *task, unsigned int mode)
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700173{
174 int err;
175 task_lock(task);
Stephen Smalley006ebb42008-05-19 08:32:49 -0400176 err = __ptrace_may_access(task, mode);
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700177 task_unlock(task);
Roland McGrath3a709702009-04-07 23:21:06 -0700178 return !err;
Miklos Szerediab8d11b2005-09-06 15:18:24 -0700179}
180
Linus Torvaldse3e89cc2011-03-04 09:23:30 -0800181static int ptrace_attach(struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
Tejun Heod79fdd62011-03-23 10:37:00 +0100183 bool wait_trap = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 int retval;
Linus Torvaldsf5b40e32006-05-07 10:49:33 -0700185
Al Viroa5cb0132007-03-20 13:58:35 -0400186 audit_ptrace(task);
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 retval = -EPERM;
Oleg Nesterovb79b7ba2009-06-17 16:27:31 -0700189 if (unlikely(task->flags & PF_KTHREAD))
190 goto out;
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -0700191 if (same_thread_group(task, current))
Linus Torvaldsf5b40e32006-05-07 10:49:33 -0700192 goto out;
193
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700194 /*
195 * Protect exec's credential calculations against our interference;
David Howells5e751e92009-05-08 13:55:22 +0100196 * interference; SUID, SGID and LSM creds get determined differently
197 * under ptrace.
David Howellsd84f4f92008-11-14 10:39:23 +1100198 */
Oleg Nesterov793285f2009-07-05 12:08:26 -0700199 retval = -ERESTARTNOINTR;
KOSAKI Motohiro9b1bf122010-10-27 15:34:08 -0700200 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
David Howellsd84f4f92008-11-14 10:39:23 +1100201 goto out;
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700202
Linus Torvaldsf5b40e32006-05-07 10:49:33 -0700203 task_lock(task);
Stephen Smalley006ebb42008-05-19 08:32:49 -0400204 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700205 task_unlock(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 if (retval)
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700207 goto unlock_creds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700209 write_lock_irq(&tasklist_lock);
Oleg Nesterovb79b7ba2009-06-17 16:27:31 -0700210 retval = -EPERM;
211 if (unlikely(task->exit_state))
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700212 goto unlock_tasklist;
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700213 if (task->ptrace)
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700214 goto unlock_tasklist;
Oleg Nesterovb79b7ba2009-06-17 16:27:31 -0700215
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700216 task->ptrace = PT_PTRACED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 if (capable(CAP_SYS_PTRACE))
218 task->ptrace |= PT_PTRACE_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 __ptrace_link(task, current);
Oleg Nesterov33e9fc72008-04-30 00:53:14 -0700221 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
Oleg Nesterovb79b7ba2009-06-17 16:27:31 -0700222
Tejun Heod79fdd62011-03-23 10:37:00 +0100223 spin_lock(&task->sighand->siglock);
224
225 /*
226 * If the task is already STOPPED, set GROUP_STOP_PENDING and
227 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
228 * will be cleared if the child completes the transition or any
229 * event which clears the group stop states happens. We'll wait
230 * for the transition to complete before returning from this
231 * function.
232 *
233 * This hides STOPPED -> RUNNING -> TRACED transition from the
234 * attaching thread but a different thread in the same group can
235 * still observe the transient RUNNING state. IOW, if another
236 * thread's WNOHANG wait(2) on the stopped tracee races against
237 * ATTACH, the wait(2) may fail due to the transient RUNNING.
238 *
239 * The following task_is_stopped() test is safe as both transitions
240 * in and out of STOPPED are protected by siglock.
241 */
242 if (task_is_stopped(task)) {
243 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
244 signal_wake_up(task, 1);
245 wait_trap = true;
246 }
247
248 spin_unlock(&task->sighand->siglock);
249
Oleg Nesterovb79b7ba2009-06-17 16:27:31 -0700250 retval = 0;
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700251unlock_tasklist:
252 write_unlock_irq(&tasklist_lock);
253unlock_creds:
KOSAKI Motohiro9b1bf122010-10-27 15:34:08 -0700254 mutex_unlock(&task->signal->cred_guard_mutex);
Linus Torvaldsf5b40e32006-05-07 10:49:33 -0700255out:
Tejun Heod79fdd62011-03-23 10:37:00 +0100256 if (wait_trap)
257 wait_event(current->signal->wait_chldexit,
258 !(task->group_stop & GROUP_STOP_TRAPPING));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 return retval;
260}
261
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700262/**
263 * ptrace_traceme -- helper for PTRACE_TRACEME
264 *
265 * Performs checks and sets PT_PTRACED.
266 * Should be used by all ptrace implementations for PTRACE_TRACEME.
267 */
Linus Torvaldse3e89cc2011-03-04 09:23:30 -0800268static int ptrace_traceme(void)
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700269{
270 int ret = -EPERM;
271
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700272 write_lock_irq(&tasklist_lock);
273 /* Are we already being traced? */
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700274 if (!current->ptrace) {
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700275 ret = security_ptrace_traceme(current->parent);
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700276 /*
277 * Check PF_EXITING to ensure ->real_parent has not passed
278 * exit_ptrace(). Otherwise we don't report the error but
279 * pretend ->real_parent untraces us right after return.
280 */
281 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
282 current->ptrace = PT_PTRACED;
283 __ptrace_link(current, current->real_parent);
284 }
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700285 }
Oleg Nesterov4b105cb2009-06-17 16:27:33 -0700286 write_unlock_irq(&tasklist_lock);
287
Oleg Nesterovf2f0b002009-06-17 16:27:32 -0700288 return ret;
289}
290
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700291/*
292 * Called with irqs disabled, returns true if childs should reap themselves.
293 */
294static int ignoring_children(struct sighand_struct *sigh)
295{
296 int ret;
297 spin_lock(&sigh->siglock);
298 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
299 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
300 spin_unlock(&sigh->siglock);
301 return ret;
302}
303
304/*
305 * Called with tasklist_lock held for writing.
306 * Unlink a traced task, and clean it up if it was a traced zombie.
307 * Return true if it needs to be reaped with release_task().
308 * (We can't call release_task() here because we already hold tasklist_lock.)
309 *
310 * If it's a zombie, our attachedness prevented normal parent notification
311 * or self-reaping. Do notification now if it would have happened earlier.
312 * If it should reap itself, return true.
313 *
Oleg Nesterova7f07652009-09-23 15:56:44 -0700314 * If it's our own child, there is no notification to do. But if our normal
315 * children self-reap, then this child was prevented by ptrace and we must
316 * reap it now, in that case we must also wake up sub-threads sleeping in
317 * do_wait().
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700318 */
319static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
320{
321 __ptrace_unlink(p);
322
323 if (p->exit_state == EXIT_ZOMBIE) {
324 if (!task_detached(p) && thread_group_empty(p)) {
325 if (!same_thread_group(p->real_parent, tracer))
326 do_notify_parent(p, p->exit_signal);
Oleg Nesterova7f07652009-09-23 15:56:44 -0700327 else if (ignoring_children(tracer->sighand)) {
328 __wake_up_parent(p, tracer);
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700329 p->exit_signal = -1;
Oleg Nesterova7f07652009-09-23 15:56:44 -0700330 }
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700331 }
332 if (task_detached(p)) {
333 /* Mark it as in the process of being reaped. */
334 p->exit_state = EXIT_DEAD;
335 return true;
336 }
337 }
338
339 return false;
340}
341
Linus Torvaldse3e89cc2011-03-04 09:23:30 -0800342static int ptrace_detach(struct task_struct *child, unsigned int data)
Oleg Nesterov5ecfbae2006-02-15 22:50:10 +0300343{
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700344 bool dead = false;
Oleg Nesterov45761452009-04-02 16:58:14 -0700345
Oleg Nesterov5ecfbae2006-02-15 22:50:10 +0300346 if (!valid_signal(data))
347 return -EIO;
348
349 /* Architecture-specific hardware disable .. */
350 ptrace_disable(child);
Roland McGrath7d941432007-09-05 03:05:56 -0700351 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
Oleg Nesterov5ecfbae2006-02-15 22:50:10 +0300352
Oleg Nesterov95c3eb72009-04-02 16:58:11 -0700353 write_lock_irq(&tasklist_lock);
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700354 /*
355 * This child can be already killed. Make sure de_thread() or
356 * our sub-thread doing do_wait() didn't do release_task() yet.
357 */
Oleg Nesterov95c3eb72009-04-02 16:58:11 -0700358 if (child->ptrace) {
359 child->exit_code = data;
Oleg Nesterov45761452009-04-02 16:58:14 -0700360 dead = __ptrace_detach(current, child);
Oleg Nesterov95c3eb72009-04-02 16:58:11 -0700361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 write_unlock_irq(&tasklist_lock);
363
Oleg Nesterov45761452009-04-02 16:58:14 -0700364 if (unlikely(dead))
365 release_task(child);
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 return 0;
368}
369
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700370/*
Oleg Nesterovc7e49c12010-08-10 18:03:07 -0700371 * Detach all tasks we were using ptrace on. Called with tasklist held
372 * for writing, and returns with it held too. But note it can release
373 * and reacquire the lock.
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700374 */
375void exit_ptrace(struct task_struct *tracer)
Namhyung Kimc4b5ed22010-10-27 15:33:44 -0700376 __releases(&tasklist_lock)
377 __acquires(&tasklist_lock)
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700378{
379 struct task_struct *p, *n;
380 LIST_HEAD(ptrace_dead);
381
Oleg Nesterovc7e49c12010-08-10 18:03:07 -0700382 if (likely(list_empty(&tracer->ptraced)))
383 return;
384
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700385 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
386 if (__ptrace_detach(tracer, p))
387 list_add(&p->ptrace_entry, &ptrace_dead);
388 }
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700389
Oleg Nesterovc7e49c12010-08-10 18:03:07 -0700390 write_unlock_irq(&tasklist_lock);
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700391 BUG_ON(!list_empty(&tracer->ptraced));
392
393 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
394 list_del_init(&p->ptrace_entry);
395 release_task(p);
396 }
Oleg Nesterovc7e49c12010-08-10 18:03:07 -0700397
398 write_lock_irq(&tasklist_lock);
Oleg Nesterov39c626a2009-04-02 16:58:18 -0700399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
402{
403 int copied = 0;
404
405 while (len > 0) {
406 char buf[128];
407 int this_len, retval;
408
409 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
410 retval = access_process_vm(tsk, src, buf, this_len, 0);
411 if (!retval) {
412 if (copied)
413 break;
414 return -EIO;
415 }
416 if (copy_to_user(dst, buf, retval))
417 return -EFAULT;
418 copied += retval;
419 src += retval;
420 dst += retval;
Roland McGrath3a709702009-04-07 23:21:06 -0700421 len -= retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 }
423 return copied;
424}
425
426int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
427{
428 int copied = 0;
429
430 while (len > 0) {
431 char buf[128];
432 int this_len, retval;
433
434 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
435 if (copy_from_user(buf, src, this_len))
436 return -EFAULT;
437 retval = access_process_vm(tsk, dst, buf, this_len, 1);
438 if (!retval) {
439 if (copied)
440 break;
441 return -EIO;
442 }
443 copied += retval;
444 src += retval;
445 dst += retval;
Roland McGrath3a709702009-04-07 23:21:06 -0700446 len -= retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 }
448 return copied;
449}
450
Namhyung Kim4abf9862010-10-27 15:33:45 -0700451static int ptrace_setoptions(struct task_struct *child, unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452{
453 child->ptrace &= ~PT_TRACE_MASK;
454
455 if (data & PTRACE_O_TRACESYSGOOD)
456 child->ptrace |= PT_TRACESYSGOOD;
457
458 if (data & PTRACE_O_TRACEFORK)
459 child->ptrace |= PT_TRACE_FORK;
460
461 if (data & PTRACE_O_TRACEVFORK)
462 child->ptrace |= PT_TRACE_VFORK;
463
464 if (data & PTRACE_O_TRACECLONE)
465 child->ptrace |= PT_TRACE_CLONE;
466
467 if (data & PTRACE_O_TRACEEXEC)
468 child->ptrace |= PT_TRACE_EXEC;
469
470 if (data & PTRACE_O_TRACEVFORKDONE)
471 child->ptrace |= PT_TRACE_VFORK_DONE;
472
473 if (data & PTRACE_O_TRACEEXIT)
474 child->ptrace |= PT_TRACE_EXIT;
475
476 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
477}
478
Roland McGrathe16b2782008-04-20 13:10:12 -0700479static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480{
Oleg Nesterove4961252009-06-17 16:27:36 -0700481 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 int error = -ESRCH;
483
Oleg Nesterove4961252009-06-17 16:27:36 -0700484 if (lock_task_sighand(child, &flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 error = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 if (likely(child->last_siginfo != NULL)) {
Roland McGrathe16b2782008-04-20 13:10:12 -0700487 *info = *child->last_siginfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 error = 0;
489 }
Oleg Nesterove4961252009-06-17 16:27:36 -0700490 unlock_task_sighand(child, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 return error;
493}
494
Roland McGrathe16b2782008-04-20 13:10:12 -0700495static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496{
Oleg Nesterove4961252009-06-17 16:27:36 -0700497 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 int error = -ESRCH;
499
Oleg Nesterove4961252009-06-17 16:27:36 -0700500 if (lock_task_sighand(child, &flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 error = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (likely(child->last_siginfo != NULL)) {
Roland McGrathe16b2782008-04-20 13:10:12 -0700503 *child->last_siginfo = *info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 error = 0;
505 }
Oleg Nesterove4961252009-06-17 16:27:36 -0700506 unlock_task_sighand(child, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 return error;
509}
510
Roland McGrath36df29d2008-01-30 13:30:51 +0100511
512#ifdef PTRACE_SINGLESTEP
513#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
514#else
515#define is_singlestep(request) 0
516#endif
517
Roland McGrath5b88abb2008-01-30 13:30:53 +0100518#ifdef PTRACE_SINGLEBLOCK
519#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
520#else
521#define is_singleblock(request) 0
522#endif
523
Roland McGrath36df29d2008-01-30 13:30:51 +0100524#ifdef PTRACE_SYSEMU
525#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
526#else
527#define is_sysemu_singlestep(request) 0
528#endif
529
Namhyung Kim4abf9862010-10-27 15:33:45 -0700530static int ptrace_resume(struct task_struct *child, long request,
531 unsigned long data)
Roland McGrath36df29d2008-01-30 13:30:51 +0100532{
533 if (!valid_signal(data))
534 return -EIO;
535
536 if (request == PTRACE_SYSCALL)
537 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
538 else
539 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
540
541#ifdef TIF_SYSCALL_EMU
542 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
543 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
544 else
545 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
546#endif
547
Roland McGrath5b88abb2008-01-30 13:30:53 +0100548 if (is_singleblock(request)) {
549 if (unlikely(!arch_has_block_step()))
550 return -EIO;
551 user_enable_block_step(child);
552 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
Roland McGrath36df29d2008-01-30 13:30:51 +0100553 if (unlikely(!arch_has_single_step()))
554 return -EIO;
555 user_enable_single_step(child);
Roland McGrath3a709702009-04-07 23:21:06 -0700556 } else {
Roland McGrath36df29d2008-01-30 13:30:51 +0100557 user_disable_single_step(child);
Roland McGrath3a709702009-04-07 23:21:06 -0700558 }
Roland McGrath36df29d2008-01-30 13:30:51 +0100559
560 child->exit_code = data;
561 wake_up_process(child);
562
563 return 0;
564}
565
Suresh Siddha2225a122010-02-11 11:51:00 -0800566#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
567
568static const struct user_regset *
569find_regset(const struct user_regset_view *view, unsigned int type)
570{
571 const struct user_regset *regset;
572 int n;
573
574 for (n = 0; n < view->n; ++n) {
575 regset = view->regsets + n;
576 if (regset->core_note_type == type)
577 return regset;
578 }
579
580 return NULL;
581}
582
583static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
584 struct iovec *kiov)
585{
586 const struct user_regset_view *view = task_user_regset_view(task);
587 const struct user_regset *regset = find_regset(view, type);
588 int regset_no;
589
590 if (!regset || (kiov->iov_len % regset->size) != 0)
Suresh Siddhac6a0dd72010-02-22 14:51:32 -0800591 return -EINVAL;
Suresh Siddha2225a122010-02-11 11:51:00 -0800592
593 regset_no = regset - view->regsets;
594 kiov->iov_len = min(kiov->iov_len,
595 (__kernel_size_t) (regset->n * regset->size));
596
597 if (req == PTRACE_GETREGSET)
598 return copy_regset_to_user(task, view, regset_no, 0,
599 kiov->iov_len, kiov->iov_base);
600 else
601 return copy_regset_from_user(task, view, regset_no, 0,
602 kiov->iov_len, kiov->iov_base);
603}
604
605#endif
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607int ptrace_request(struct task_struct *child, long request,
Namhyung Kim4abf9862010-10-27 15:33:45 -0700608 unsigned long addr, unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
610 int ret = -EIO;
Roland McGrathe16b2782008-04-20 13:10:12 -0700611 siginfo_t siginfo;
Namhyung Kim9fed81d2010-10-27 15:33:46 -0700612 void __user *datavp = (void __user *) data;
613 unsigned long __user *datalp = datavp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615 switch (request) {
Roland McGrath16c3e382008-01-30 13:31:47 +0100616 case PTRACE_PEEKTEXT:
617 case PTRACE_PEEKDATA:
618 return generic_ptrace_peekdata(child, addr, data);
619 case PTRACE_POKETEXT:
620 case PTRACE_POKEDATA:
621 return generic_ptrace_pokedata(child, addr, data);
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623#ifdef PTRACE_OLDSETOPTIONS
624 case PTRACE_OLDSETOPTIONS:
625#endif
626 case PTRACE_SETOPTIONS:
627 ret = ptrace_setoptions(child, data);
628 break;
629 case PTRACE_GETEVENTMSG:
Namhyung Kim9fed81d2010-10-27 15:33:46 -0700630 ret = put_user(child->ptrace_message, datalp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 break;
Roland McGrathe16b2782008-04-20 13:10:12 -0700632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 case PTRACE_GETSIGINFO:
Roland McGrathe16b2782008-04-20 13:10:12 -0700634 ret = ptrace_getsiginfo(child, &siginfo);
635 if (!ret)
Namhyung Kim9fed81d2010-10-27 15:33:46 -0700636 ret = copy_siginfo_to_user(datavp, &siginfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 break;
Roland McGrathe16b2782008-04-20 13:10:12 -0700638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 case PTRACE_SETSIGINFO:
Namhyung Kim9fed81d2010-10-27 15:33:46 -0700640 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
Roland McGrathe16b2782008-04-20 13:10:12 -0700641 ret = -EFAULT;
642 else
643 ret = ptrace_setsiginfo(child, &siginfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 break;
Roland McGrathe16b2782008-04-20 13:10:12 -0700645
Alexey Dobriyan1bcf5482007-10-16 01:23:45 -0700646 case PTRACE_DETACH: /* detach a process that was attached. */
647 ret = ptrace_detach(child, data);
648 break;
Roland McGrath36df29d2008-01-30 13:30:51 +0100649
Mike Frysinger9c1a1252010-05-26 14:42:52 -0700650#ifdef CONFIG_BINFMT_ELF_FDPIC
651 case PTRACE_GETFDPIC: {
Oleg Nesterove0129ef2010-05-26 14:42:53 -0700652 struct mm_struct *mm = get_task_mm(child);
Mike Frysinger9c1a1252010-05-26 14:42:52 -0700653 unsigned long tmp = 0;
654
Oleg Nesterove0129ef2010-05-26 14:42:53 -0700655 ret = -ESRCH;
656 if (!mm)
657 break;
658
Mike Frysinger9c1a1252010-05-26 14:42:52 -0700659 switch (addr) {
660 case PTRACE_GETFDPIC_EXEC:
Oleg Nesterove0129ef2010-05-26 14:42:53 -0700661 tmp = mm->context.exec_fdpic_loadmap;
Mike Frysinger9c1a1252010-05-26 14:42:52 -0700662 break;
663 case PTRACE_GETFDPIC_INTERP:
Oleg Nesterove0129ef2010-05-26 14:42:53 -0700664 tmp = mm->context.interp_fdpic_loadmap;
Mike Frysinger9c1a1252010-05-26 14:42:52 -0700665 break;
666 default:
667 break;
668 }
Oleg Nesterove0129ef2010-05-26 14:42:53 -0700669 mmput(mm);
Mike Frysinger9c1a1252010-05-26 14:42:52 -0700670
Namhyung Kim9fed81d2010-10-27 15:33:46 -0700671 ret = put_user(tmp, datalp);
Mike Frysinger9c1a1252010-05-26 14:42:52 -0700672 break;
673 }
674#endif
675
Roland McGrath36df29d2008-01-30 13:30:51 +0100676#ifdef PTRACE_SINGLESTEP
677 case PTRACE_SINGLESTEP:
678#endif
Roland McGrath5b88abb2008-01-30 13:30:53 +0100679#ifdef PTRACE_SINGLEBLOCK
680 case PTRACE_SINGLEBLOCK:
681#endif
Roland McGrath36df29d2008-01-30 13:30:51 +0100682#ifdef PTRACE_SYSEMU
683 case PTRACE_SYSEMU:
684 case PTRACE_SYSEMU_SINGLESTEP:
685#endif
686 case PTRACE_SYSCALL:
687 case PTRACE_CONT:
688 return ptrace_resume(child, request, data);
689
690 case PTRACE_KILL:
691 if (child->exit_state) /* already dead */
692 return 0;
693 return ptrace_resume(child, request, SIGKILL);
694
Suresh Siddha2225a122010-02-11 11:51:00 -0800695#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
696 case PTRACE_GETREGSET:
697 case PTRACE_SETREGSET:
698 {
699 struct iovec kiov;
Namhyung Kim9fed81d2010-10-27 15:33:46 -0700700 struct iovec __user *uiov = datavp;
Suresh Siddha2225a122010-02-11 11:51:00 -0800701
702 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
703 return -EFAULT;
704
705 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
706 __get_user(kiov.iov_len, &uiov->iov_len))
707 return -EFAULT;
708
709 ret = ptrace_regset(child, request, addr, &kiov);
710 if (!ret)
711 ret = __put_user(kiov.iov_len, &uiov->iov_len);
712 break;
713 }
714#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 default:
716 break;
717 }
718
719 return ret;
720}
Christoph Hellwig481bed42005-11-07 00:59:47 -0800721
Oleg Nesterov8053bdd2009-06-17 16:27:34 -0700722static struct task_struct *ptrace_get_task_struct(pid_t pid)
Christoph Hellwig6b9c7ed2006-01-08 01:02:33 -0800723{
724 struct task_struct *child;
Christoph Hellwig481bed42005-11-07 00:59:47 -0800725
Oleg Nesterov8053bdd2009-06-17 16:27:34 -0700726 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -0700727 child = find_task_by_vpid(pid);
Christoph Hellwig481bed42005-11-07 00:59:47 -0800728 if (child)
729 get_task_struct(child);
Oleg Nesterov8053bdd2009-06-17 16:27:34 -0700730 rcu_read_unlock();
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -0700731
Christoph Hellwig481bed42005-11-07 00:59:47 -0800732 if (!child)
Christoph Hellwig6b9c7ed2006-01-08 01:02:33 -0800733 return ERR_PTR(-ESRCH);
734 return child;
Christoph Hellwig481bed42005-11-07 00:59:47 -0800735}
736
Christoph Hellwig0ac15552007-10-16 01:26:37 -0700737#ifndef arch_ptrace_attach
738#define arch_ptrace_attach(child) do { } while (0)
739#endif
740
Namhyung Kim4abf9862010-10-27 15:33:45 -0700741SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
742 unsigned long, data)
Christoph Hellwig481bed42005-11-07 00:59:47 -0800743{
744 struct task_struct *child;
745 long ret;
746
Christoph Hellwig6b9c7ed2006-01-08 01:02:33 -0800747 if (request == PTRACE_TRACEME) {
748 ret = ptrace_traceme();
Haavard Skinnemoen6ea6dd92007-11-27 13:02:40 +0100749 if (!ret)
750 arch_ptrace_attach(current);
Christoph Hellwig481bed42005-11-07 00:59:47 -0800751 goto out;
Christoph Hellwig6b9c7ed2006-01-08 01:02:33 -0800752 }
753
754 child = ptrace_get_task_struct(pid);
755 if (IS_ERR(child)) {
756 ret = PTR_ERR(child);
757 goto out;
758 }
Christoph Hellwig481bed42005-11-07 00:59:47 -0800759
760 if (request == PTRACE_ATTACH) {
761 ret = ptrace_attach(child);
Christoph Hellwig0ac15552007-10-16 01:26:37 -0700762 /*
763 * Some architectures need to do book-keeping after
764 * a ptrace attach.
765 */
766 if (!ret)
767 arch_ptrace_attach(child);
Christoph Hellwig005f18d2005-11-13 16:06:33 -0800768 goto out_put_task_struct;
Christoph Hellwig481bed42005-11-07 00:59:47 -0800769 }
770
771 ret = ptrace_check_attach(child, request == PTRACE_KILL);
772 if (ret < 0)
773 goto out_put_task_struct;
774
775 ret = arch_ptrace(child, request, addr, data);
Christoph Hellwig481bed42005-11-07 00:59:47 -0800776
777 out_put_task_struct:
778 put_task_struct(child);
779 out:
Christoph Hellwig481bed42005-11-07 00:59:47 -0800780 return ret;
781}
Alexey Dobriyan76647322007-07-17 04:03:43 -0700782
Namhyung Kim4abf9862010-10-27 15:33:45 -0700783int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
784 unsigned long data)
Alexey Dobriyan76647322007-07-17 04:03:43 -0700785{
786 unsigned long tmp;
787 int copied;
788
789 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
790 if (copied != sizeof(tmp))
791 return -EIO;
792 return put_user(tmp, (unsigned long __user *)data);
793}
Alexey Dobriyanf284ce72007-07-17 04:03:44 -0700794
Namhyung Kim4abf9862010-10-27 15:33:45 -0700795int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
796 unsigned long data)
Alexey Dobriyanf284ce72007-07-17 04:03:44 -0700797{
798 int copied;
799
800 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
801 return (copied == sizeof(data)) ? 0 : -EIO;
802}
Roland McGrath032d82d2008-01-30 13:31:47 +0100803
Christoph Hellwig96b89362008-11-25 08:10:03 +0100804#if defined CONFIG_COMPAT
Roland McGrath032d82d2008-01-30 13:31:47 +0100805#include <linux/compat.h>
806
807int compat_ptrace_request(struct task_struct *child, compat_long_t request,
808 compat_ulong_t addr, compat_ulong_t data)
809{
810 compat_ulong_t __user *datap = compat_ptr(data);
811 compat_ulong_t word;
Roland McGrathe16b2782008-04-20 13:10:12 -0700812 siginfo_t siginfo;
Roland McGrath032d82d2008-01-30 13:31:47 +0100813 int ret;
814
815 switch (request) {
816 case PTRACE_PEEKTEXT:
817 case PTRACE_PEEKDATA:
818 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
819 if (ret != sizeof(word))
820 ret = -EIO;
821 else
822 ret = put_user(word, datap);
823 break;
824
825 case PTRACE_POKETEXT:
826 case PTRACE_POKEDATA:
827 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
828 ret = (ret != sizeof(data) ? -EIO : 0);
829 break;
830
831 case PTRACE_GETEVENTMSG:
832 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
833 break;
834
Roland McGrathe16b2782008-04-20 13:10:12 -0700835 case PTRACE_GETSIGINFO:
836 ret = ptrace_getsiginfo(child, &siginfo);
837 if (!ret)
838 ret = copy_siginfo_to_user32(
839 (struct compat_siginfo __user *) datap,
840 &siginfo);
841 break;
842
843 case PTRACE_SETSIGINFO:
844 memset(&siginfo, 0, sizeof siginfo);
845 if (copy_siginfo_from_user32(
846 &siginfo, (struct compat_siginfo __user *) datap))
847 ret = -EFAULT;
848 else
849 ret = ptrace_setsiginfo(child, &siginfo);
850 break;
Suresh Siddha2225a122010-02-11 11:51:00 -0800851#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
852 case PTRACE_GETREGSET:
853 case PTRACE_SETREGSET:
854 {
855 struct iovec kiov;
856 struct compat_iovec __user *uiov =
857 (struct compat_iovec __user *) datap;
858 compat_uptr_t ptr;
859 compat_size_t len;
860
861 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
862 return -EFAULT;
863
864 if (__get_user(ptr, &uiov->iov_base) ||
865 __get_user(len, &uiov->iov_len))
866 return -EFAULT;
867
868 kiov.iov_base = compat_ptr(ptr);
869 kiov.iov_len = len;
870
871 ret = ptrace_regset(child, request, addr, &kiov);
872 if (!ret)
873 ret = __put_user(kiov.iov_len, &uiov->iov_len);
874 break;
875 }
876#endif
Roland McGrathe16b2782008-04-20 13:10:12 -0700877
Roland McGrath032d82d2008-01-30 13:31:47 +0100878 default:
879 ret = ptrace_request(child, request, addr, data);
880 }
881
882 return ret;
883}
Roland McGrathc269f192008-01-30 13:31:48 +0100884
Roland McGrathc269f192008-01-30 13:31:48 +0100885asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
886 compat_long_t addr, compat_long_t data)
887{
888 struct task_struct *child;
889 long ret;
890
Roland McGrathc269f192008-01-30 13:31:48 +0100891 if (request == PTRACE_TRACEME) {
892 ret = ptrace_traceme();
893 goto out;
894 }
895
896 child = ptrace_get_task_struct(pid);
897 if (IS_ERR(child)) {
898 ret = PTR_ERR(child);
899 goto out;
900 }
901
902 if (request == PTRACE_ATTACH) {
903 ret = ptrace_attach(child);
904 /*
905 * Some architectures need to do book-keeping after
906 * a ptrace attach.
907 */
908 if (!ret)
909 arch_ptrace_attach(child);
910 goto out_put_task_struct;
911 }
912
913 ret = ptrace_check_attach(child, request == PTRACE_KILL);
914 if (!ret)
915 ret = compat_arch_ptrace(child, request, addr, data);
916
917 out_put_task_struct:
918 put_task_struct(child);
919 out:
Roland McGrathc269f192008-01-30 13:31:48 +0100920 return ret;
921}
Christoph Hellwig96b89362008-11-25 08:10:03 +0100922#endif /* CONFIG_COMPAT */