blob: efba851ee0183f2e8b8c35c729271b009a3e5382
1 | /* |
2 | * linux/kernel/ptrace.c |
3 | * |
4 | * (C) Copyright 1999 Linus Torvalds |
5 | * |
6 | * Common interfaces for "ptrace()" which we do not want |
7 | * to continually duplicate across every architecture. |
8 | */ |
9 | |
10 | #include <linux/capability.h> |
11 | #include <linux/export.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/highmem.h> |
16 | #include <linux/pagemap.h> |
17 | #include <linux/ptrace.h> |
18 | #include <linux/security.h> |
19 | #include <linux/signal.h> |
20 | #include <linux/uio.h> |
21 | #include <linux/audit.h> |
22 | #include <linux/pid_namespace.h> |
23 | #include <linux/syscalls.h> |
24 | #include <linux/uaccess.h> |
25 | #include <linux/regset.h> |
26 | #include <linux/hw_breakpoint.h> |
27 | #include <linux/cn_proc.h> |
28 | #include <linux/compat.h> |
29 | |
30 | /* |
31 | * Access another process' address space via ptrace. |
32 | * Source/target buffer must be kernel space, |
33 | * Do not walk the page table directly, use get_user_pages |
34 | */ |
35 | int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, |
36 | void *buf, int len, unsigned int gup_flags) |
37 | { |
38 | struct mm_struct *mm; |
39 | int ret; |
40 | |
41 | mm = get_task_mm(tsk); |
42 | if (!mm) |
43 | return 0; |
44 | |
45 | if (!tsk->ptrace || |
46 | (current != tsk->parent) || |
47 | ((get_dumpable(mm) != SUID_DUMP_USER) && |
48 | !ptracer_capable(tsk, mm->user_ns))) { |
49 | mmput(mm); |
50 | return 0; |
51 | } |
52 | |
53 | ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); |
54 | mmput(mm); |
55 | |
56 | return ret; |
57 | } |
58 | |
59 | |
60 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, |
61 | const struct cred *ptracer_cred) |
62 | { |
63 | BUG_ON(!list_empty(&child->ptrace_entry)); |
64 | list_add(&child->ptrace_entry, &new_parent->ptraced); |
65 | child->parent = new_parent; |
66 | child->ptracer_cred = get_cred(ptracer_cred); |
67 | } |
68 | |
69 | /* |
70 | * ptrace a task: make the debugger its new parent and |
71 | * move it to the ptrace list. |
72 | * |
73 | * Must be called with the tasklist lock write-held. |
74 | */ |
75 | static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
76 | { |
77 | rcu_read_lock(); |
78 | __ptrace_link(child, new_parent, __task_cred(new_parent)); |
79 | rcu_read_unlock(); |
80 | } |
81 | |
82 | /** |
83 | * __ptrace_unlink - unlink ptracee and restore its execution state |
84 | * @child: ptracee to be unlinked |
85 | * |
86 | * Remove @child from the ptrace list, move it back to the original parent, |
87 | * and restore the execution state so that it conforms to the group stop |
88 | * state. |
89 | * |
90 | * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer |
91 | * exiting. For PTRACE_DETACH, unless the ptracee has been killed between |
92 | * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. |
93 | * If the ptracer is exiting, the ptracee can be in any state. |
94 | * |
95 | * After detach, the ptracee should be in a state which conforms to the |
96 | * group stop. If the group is stopped or in the process of stopping, the |
97 | * ptracee should be put into TASK_STOPPED; otherwise, it should be woken |
98 | * up from TASK_TRACED. |
99 | * |
100 | * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, |
101 | * it goes through TRACED -> RUNNING -> STOPPED transition which is similar |
102 | * to but in the opposite direction of what happens while attaching to a |
103 | * stopped task. However, in this direction, the intermediate RUNNING |
104 | * state is not hidden even from the current ptracer and if it immediately |
105 | * re-attaches and performs a WNOHANG wait(2), it may fail. |
106 | * |
107 | * CONTEXT: |
108 | * write_lock_irq(tasklist_lock) |
109 | */ |
110 | void __ptrace_unlink(struct task_struct *child) |
111 | { |
112 | const struct cred *old_cred; |
113 | BUG_ON(!child->ptrace); |
114 | |
115 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
116 | |
117 | child->parent = child->real_parent; |
118 | list_del_init(&child->ptrace_entry); |
119 | old_cred = child->ptracer_cred; |
120 | child->ptracer_cred = NULL; |
121 | put_cred(old_cred); |
122 | |
123 | spin_lock(&child->sighand->siglock); |
124 | child->ptrace = 0; |
125 | /* |
126 | * Clear all pending traps and TRAPPING. TRAPPING should be |
127 | * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. |
128 | */ |
129 | task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); |
130 | task_clear_jobctl_trapping(child); |
131 | |
132 | /* |
133 | * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and |
134 | * @child isn't dead. |
135 | */ |
136 | if (!(child->flags & PF_EXITING) && |
137 | (child->signal->flags & SIGNAL_STOP_STOPPED || |
138 | child->signal->group_stop_count)) { |
139 | child->jobctl |= JOBCTL_STOP_PENDING; |
140 | |
141 | /* |
142 | * This is only possible if this thread was cloned by the |
143 | * traced task running in the stopped group, set the signal |
144 | * for the future reports. |
145 | * FIXME: we should change ptrace_init_task() to handle this |
146 | * case. |
147 | */ |
148 | if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) |
149 | child->jobctl |= SIGSTOP; |
150 | } |
151 | |
152 | /* |
153 | * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick |
154 | * @child in the butt. Note that @resume should be used iff @child |
155 | * is in TASK_TRACED; otherwise, we might unduly disrupt |
156 | * TASK_KILLABLE sleeps. |
157 | */ |
158 | if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) |
159 | ptrace_signal_wake_up(child, true); |
160 | |
161 | spin_unlock(&child->sighand->siglock); |
162 | } |
163 | |
164 | /* Ensure that nothing can wake it up, even SIGKILL */ |
165 | static bool ptrace_freeze_traced(struct task_struct *task) |
166 | { |
167 | bool ret = false; |
168 | |
169 | /* Lockless, nobody but us can set this flag */ |
170 | if (task->jobctl & JOBCTL_LISTENING) |
171 | return ret; |
172 | |
173 | spin_lock_irq(&task->sighand->siglock); |
174 | if (task_is_traced(task) && !__fatal_signal_pending(task)) { |
175 | task->state = __TASK_TRACED; |
176 | ret = true; |
177 | } |
178 | spin_unlock_irq(&task->sighand->siglock); |
179 | |
180 | return ret; |
181 | } |
182 | |
183 | static void ptrace_unfreeze_traced(struct task_struct *task) |
184 | { |
185 | if (task->state != __TASK_TRACED) |
186 | return; |
187 | |
188 | WARN_ON(!task->ptrace || task->parent != current); |
189 | |
190 | /* |
191 | * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. |
192 | * Recheck state under the lock to close this race. |
193 | */ |
194 | spin_lock_irq(&task->sighand->siglock); |
195 | if (task->state == __TASK_TRACED) { |
196 | if (__fatal_signal_pending(task)) |
197 | wake_up_state(task, __TASK_TRACED); |
198 | else |
199 | task->state = TASK_TRACED; |
200 | } |
201 | spin_unlock_irq(&task->sighand->siglock); |
202 | } |
203 | |
204 | /** |
205 | * ptrace_check_attach - check whether ptracee is ready for ptrace operation |
206 | * @child: ptracee to check for |
207 | * @ignore_state: don't check whether @child is currently %TASK_TRACED |
208 | * |
209 | * Check whether @child is being ptraced by %current and ready for further |
210 | * ptrace operations. If @ignore_state is %false, @child also should be in |
211 | * %TASK_TRACED state and on return the child is guaranteed to be traced |
212 | * and not executing. If @ignore_state is %true, @child can be in any |
213 | * state. |
214 | * |
215 | * CONTEXT: |
216 | * Grabs and releases tasklist_lock and @child->sighand->siglock. |
217 | * |
218 | * RETURNS: |
219 | * 0 on success, -ESRCH if %child is not ready. |
220 | */ |
221 | static int ptrace_check_attach(struct task_struct *child, bool ignore_state) |
222 | { |
223 | int ret = -ESRCH; |
224 | |
225 | /* |
226 | * We take the read lock around doing both checks to close a |
227 | * possible race where someone else was tracing our child and |
228 | * detached between these two checks. After this locked check, |
229 | * we are sure that this is our traced child and that can only |
230 | * be changed by us so it's not changing right after this. |
231 | */ |
232 | read_lock(&tasklist_lock); |
233 | if (child->ptrace && child->parent == current) { |
234 | WARN_ON(child->state == __TASK_TRACED); |
235 | /* |
236 | * child->sighand can't be NULL, release_task() |
237 | * does ptrace_unlink() before __exit_signal(). |
238 | */ |
239 | if (ignore_state || ptrace_freeze_traced(child)) |
240 | ret = 0; |
241 | } |
242 | read_unlock(&tasklist_lock); |
243 | |
244 | if (!ret && !ignore_state) { |
245 | if (!wait_task_inactive(child, __TASK_TRACED)) { |
246 | /* |
247 | * This can only happen if may_ptrace_stop() fails and |
248 | * ptrace_stop() changes ->state back to TASK_RUNNING, |
249 | * so we should not worry about leaking __TASK_TRACED. |
250 | */ |
251 | WARN_ON(child->state == __TASK_TRACED); |
252 | ret = -ESRCH; |
253 | } |
254 | } |
255 | |
256 | return ret; |
257 | } |
258 | |
259 | static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) |
260 | { |
261 | if (mode & PTRACE_MODE_SCHED) |
262 | return false; |
263 | |
264 | if (mode & PTRACE_MODE_NOAUDIT) |
265 | return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); |
266 | else |
267 | return has_ns_capability(current, ns, CAP_SYS_PTRACE); |
268 | } |
269 | |
270 | /* Returns 0 on success, -errno on denial. */ |
271 | static int __ptrace_may_access(struct task_struct *task, unsigned int mode) |
272 | { |
273 | const struct cred *cred = current_cred(), *tcred; |
274 | struct mm_struct *mm; |
275 | kuid_t caller_uid; |
276 | kgid_t caller_gid; |
277 | |
278 | if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { |
279 | WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); |
280 | return -EPERM; |
281 | } |
282 | |
283 | /* May we inspect the given task? |
284 | * This check is used both for attaching with ptrace |
285 | * and for allowing access to sensitive information in /proc. |
286 | * |
287 | * ptrace_attach denies several cases that /proc allows |
288 | * because setting up the necessary parent/child relationship |
289 | * or halting the specified task is impossible. |
290 | */ |
291 | |
292 | /* Don't let security modules deny introspection */ |
293 | if (same_thread_group(task, current)) |
294 | return 0; |
295 | rcu_read_lock(); |
296 | if (mode & PTRACE_MODE_FSCREDS) { |
297 | caller_uid = cred->fsuid; |
298 | caller_gid = cred->fsgid; |
299 | } else { |
300 | /* |
301 | * Using the euid would make more sense here, but something |
302 | * in userland might rely on the old behavior, and this |
303 | * shouldn't be a security problem since |
304 | * PTRACE_MODE_REALCREDS implies that the caller explicitly |
305 | * used a syscall that requests access to another process |
306 | * (and not a filesystem syscall to procfs). |
307 | */ |
308 | caller_uid = cred->uid; |
309 | caller_gid = cred->gid; |
310 | } |
311 | tcred = __task_cred(task); |
312 | if (uid_eq(caller_uid, tcred->euid) && |
313 | uid_eq(caller_uid, tcred->suid) && |
314 | uid_eq(caller_uid, tcred->uid) && |
315 | gid_eq(caller_gid, tcred->egid) && |
316 | gid_eq(caller_gid, tcred->sgid) && |
317 | gid_eq(caller_gid, tcred->gid)) |
318 | goto ok; |
319 | if (ptrace_has_cap(tcred->user_ns, mode)) |
320 | goto ok; |
321 | rcu_read_unlock(); |
322 | return -EPERM; |
323 | ok: |
324 | rcu_read_unlock(); |
325 | mm = task->mm; |
326 | if (mm && |
327 | ((get_dumpable(mm) != SUID_DUMP_USER) && |
328 | !ptrace_has_cap(mm->user_ns, mode))) |
329 | return -EPERM; |
330 | |
331 | if (mode & PTRACE_MODE_SCHED) |
332 | return 0; |
333 | return security_ptrace_access_check(task, mode); |
334 | } |
335 | |
336 | bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode) |
337 | { |
338 | return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED); |
339 | } |
340 | |
341 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
342 | { |
343 | int err; |
344 | task_lock(task); |
345 | err = __ptrace_may_access(task, mode); |
346 | task_unlock(task); |
347 | return !err; |
348 | } |
349 | |
350 | static int ptrace_attach(struct task_struct *task, long request, |
351 | unsigned long addr, |
352 | unsigned long flags) |
353 | { |
354 | bool seize = (request == PTRACE_SEIZE); |
355 | int retval; |
356 | |
357 | retval = -EIO; |
358 | if (seize) { |
359 | if (addr != 0) |
360 | goto out; |
361 | if (flags & ~(unsigned long)PTRACE_O_MASK) |
362 | goto out; |
363 | flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); |
364 | } else { |
365 | flags = PT_PTRACED; |
366 | } |
367 | |
368 | audit_ptrace(task); |
369 | |
370 | retval = -EPERM; |
371 | if (unlikely(task->flags & PF_KTHREAD)) |
372 | goto out; |
373 | if (same_thread_group(task, current)) |
374 | goto out; |
375 | |
376 | /* |
377 | * Protect exec's credential calculations against our interference; |
378 | * SUID, SGID and LSM creds get determined differently |
379 | * under ptrace. |
380 | */ |
381 | retval = -ERESTARTNOINTR; |
382 | if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) |
383 | goto out; |
384 | |
385 | task_lock(task); |
386 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); |
387 | task_unlock(task); |
388 | if (retval) |
389 | goto unlock_creds; |
390 | |
391 | write_lock_irq(&tasklist_lock); |
392 | retval = -EPERM; |
393 | if (unlikely(task->exit_state)) |
394 | goto unlock_tasklist; |
395 | if (task->ptrace) |
396 | goto unlock_tasklist; |
397 | |
398 | if (seize) |
399 | flags |= PT_SEIZED; |
400 | task->ptrace = flags; |
401 | |
402 | ptrace_link(task, current); |
403 | |
404 | /* SEIZE doesn't trap tracee on attach */ |
405 | if (!seize) |
406 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); |
407 | |
408 | spin_lock(&task->sighand->siglock); |
409 | |
410 | /* |
411 | * If the task is already STOPPED, set JOBCTL_TRAP_STOP and |
412 | * TRAPPING, and kick it so that it transits to TRACED. TRAPPING |
413 | * will be cleared if the child completes the transition or any |
414 | * event which clears the group stop states happens. We'll wait |
415 | * for the transition to complete before returning from this |
416 | * function. |
417 | * |
418 | * This hides STOPPED -> RUNNING -> TRACED transition from the |
419 | * attaching thread but a different thread in the same group can |
420 | * still observe the transient RUNNING state. IOW, if another |
421 | * thread's WNOHANG wait(2) on the stopped tracee races against |
422 | * ATTACH, the wait(2) may fail due to the transient RUNNING. |
423 | * |
424 | * The following task_is_stopped() test is safe as both transitions |
425 | * in and out of STOPPED are protected by siglock. |
426 | */ |
427 | if (task_is_stopped(task) && |
428 | task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) |
429 | signal_wake_up_state(task, __TASK_STOPPED); |
430 | |
431 | spin_unlock(&task->sighand->siglock); |
432 | |
433 | retval = 0; |
434 | unlock_tasklist: |
435 | write_unlock_irq(&tasklist_lock); |
436 | unlock_creds: |
437 | mutex_unlock(&task->signal->cred_guard_mutex); |
438 | out: |
439 | if (!retval) { |
440 | /* |
441 | * We do not bother to change retval or clear JOBCTL_TRAPPING |
442 | * if wait_on_bit() was interrupted by SIGKILL. The tracer will |
443 | * not return to user-mode, it will exit and clear this bit in |
444 | * __ptrace_unlink() if it wasn't already cleared by the tracee; |
445 | * and until then nobody can ptrace this task. |
446 | */ |
447 | wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); |
448 | proc_ptrace_connector(task, PTRACE_ATTACH); |
449 | } |
450 | |
451 | return retval; |
452 | } |
453 | |
454 | /** |
455 | * ptrace_traceme -- helper for PTRACE_TRACEME |
456 | * |
457 | * Performs checks and sets PT_PTRACED. |
458 | * Should be used by all ptrace implementations for PTRACE_TRACEME. |
459 | */ |
460 | static int ptrace_traceme(void) |
461 | { |
462 | int ret = -EPERM; |
463 | |
464 | write_lock_irq(&tasklist_lock); |
465 | /* Are we already being traced? */ |
466 | if (!current->ptrace) { |
467 | ret = security_ptrace_traceme(current->parent); |
468 | /* |
469 | * Check PF_EXITING to ensure ->real_parent has not passed |
470 | * exit_ptrace(). Otherwise we don't report the error but |
471 | * pretend ->real_parent untraces us right after return. |
472 | */ |
473 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { |
474 | current->ptrace = PT_PTRACED; |
475 | ptrace_link(current, current->real_parent); |
476 | } |
477 | } |
478 | write_unlock_irq(&tasklist_lock); |
479 | |
480 | return ret; |
481 | } |
482 | |
483 | /* |
484 | * Called with irqs disabled, returns true if childs should reap themselves. |
485 | */ |
486 | static int ignoring_children(struct sighand_struct *sigh) |
487 | { |
488 | int ret; |
489 | spin_lock(&sigh->siglock); |
490 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || |
491 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); |
492 | spin_unlock(&sigh->siglock); |
493 | return ret; |
494 | } |
495 | |
496 | /* |
497 | * Called with tasklist_lock held for writing. |
498 | * Unlink a traced task, and clean it up if it was a traced zombie. |
499 | * Return true if it needs to be reaped with release_task(). |
500 | * (We can't call release_task() here because we already hold tasklist_lock.) |
501 | * |
502 | * If it's a zombie, our attachedness prevented normal parent notification |
503 | * or self-reaping. Do notification now if it would have happened earlier. |
504 | * If it should reap itself, return true. |
505 | * |
506 | * If it's our own child, there is no notification to do. But if our normal |
507 | * children self-reap, then this child was prevented by ptrace and we must |
508 | * reap it now, in that case we must also wake up sub-threads sleeping in |
509 | * do_wait(). |
510 | */ |
511 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) |
512 | { |
513 | bool dead; |
514 | |
515 | __ptrace_unlink(p); |
516 | |
517 | if (p->exit_state != EXIT_ZOMBIE) |
518 | return false; |
519 | |
520 | dead = !thread_group_leader(p); |
521 | |
522 | if (!dead && thread_group_empty(p)) { |
523 | if (!same_thread_group(p->real_parent, tracer)) |
524 | dead = do_notify_parent(p, p->exit_signal); |
525 | else if (ignoring_children(tracer->sighand)) { |
526 | __wake_up_parent(p, tracer); |
527 | dead = true; |
528 | } |
529 | } |
530 | /* Mark it as in the process of being reaped. */ |
531 | if (dead) |
532 | p->exit_state = EXIT_DEAD; |
533 | return dead; |
534 | } |
535 | |
536 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
537 | { |
538 | if (!valid_signal(data)) |
539 | return -EIO; |
540 | |
541 | /* Architecture-specific hardware disable .. */ |
542 | ptrace_disable(child); |
543 | |
544 | write_lock_irq(&tasklist_lock); |
545 | /* |
546 | * We rely on ptrace_freeze_traced(). It can't be killed and |
547 | * untraced by another thread, it can't be a zombie. |
548 | */ |
549 | WARN_ON(!child->ptrace || child->exit_state); |
550 | /* |
551 | * tasklist_lock avoids the race with wait_task_stopped(), see |
552 | * the comment in ptrace_resume(). |
553 | */ |
554 | child->exit_code = data; |
555 | __ptrace_detach(current, child); |
556 | write_unlock_irq(&tasklist_lock); |
557 | |
558 | proc_ptrace_connector(child, PTRACE_DETACH); |
559 | |
560 | return 0; |
561 | } |
562 | |
563 | /* |
564 | * Detach all tasks we were using ptrace on. Called with tasklist held |
565 | * for writing. |
566 | */ |
567 | void exit_ptrace(struct task_struct *tracer, struct list_head *dead) |
568 | { |
569 | struct task_struct *p, *n; |
570 | |
571 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { |
572 | if (unlikely(p->ptrace & PT_EXITKILL)) |
573 | send_sig_info(SIGKILL, SEND_SIG_FORCED, p); |
574 | |
575 | if (__ptrace_detach(tracer, p)) |
576 | list_add(&p->ptrace_entry, dead); |
577 | } |
578 | } |
579 | |
580 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
581 | { |
582 | int copied = 0; |
583 | |
584 | while (len > 0) { |
585 | char buf[128]; |
586 | int this_len, retval; |
587 | |
588 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
589 | retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE); |
590 | |
591 | if (!retval) { |
592 | if (copied) |
593 | break; |
594 | return -EIO; |
595 | } |
596 | if (copy_to_user(dst, buf, retval)) |
597 | return -EFAULT; |
598 | copied += retval; |
599 | src += retval; |
600 | dst += retval; |
601 | len -= retval; |
602 | } |
603 | return copied; |
604 | } |
605 | |
606 | int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) |
607 | { |
608 | int copied = 0; |
609 | |
610 | while (len > 0) { |
611 | char buf[128]; |
612 | int this_len, retval; |
613 | |
614 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
615 | if (copy_from_user(buf, src, this_len)) |
616 | return -EFAULT; |
617 | retval = ptrace_access_vm(tsk, dst, buf, this_len, |
618 | FOLL_FORCE | FOLL_WRITE); |
619 | if (!retval) { |
620 | if (copied) |
621 | break; |
622 | return -EIO; |
623 | } |
624 | copied += retval; |
625 | src += retval; |
626 | dst += retval; |
627 | len -= retval; |
628 | } |
629 | return copied; |
630 | } |
631 | |
632 | static int ptrace_setoptions(struct task_struct *child, unsigned long data) |
633 | { |
634 | unsigned flags; |
635 | |
636 | if (data & ~(unsigned long)PTRACE_O_MASK) |
637 | return -EINVAL; |
638 | |
639 | if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { |
640 | if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || |
641 | !IS_ENABLED(CONFIG_SECCOMP)) |
642 | return -EINVAL; |
643 | |
644 | if (!capable(CAP_SYS_ADMIN)) |
645 | return -EPERM; |
646 | |
647 | if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || |
648 | current->ptrace & PT_SUSPEND_SECCOMP) |
649 | return -EPERM; |
650 | } |
651 | |
652 | /* Avoid intermediate state when all opts are cleared */ |
653 | flags = child->ptrace; |
654 | flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); |
655 | flags |= (data << PT_OPT_FLAG_SHIFT); |
656 | child->ptrace = flags; |
657 | |
658 | return 0; |
659 | } |
660 | |
661 | static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) |
662 | { |
663 | unsigned long flags; |
664 | int error = -ESRCH; |
665 | |
666 | if (lock_task_sighand(child, &flags)) { |
667 | error = -EINVAL; |
668 | if (likely(child->last_siginfo != NULL)) { |
669 | *info = *child->last_siginfo; |
670 | error = 0; |
671 | } |
672 | unlock_task_sighand(child, &flags); |
673 | } |
674 | return error; |
675 | } |
676 | |
677 | static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) |
678 | { |
679 | unsigned long flags; |
680 | int error = -ESRCH; |
681 | |
682 | if (lock_task_sighand(child, &flags)) { |
683 | error = -EINVAL; |
684 | if (likely(child->last_siginfo != NULL)) { |
685 | *child->last_siginfo = *info; |
686 | error = 0; |
687 | } |
688 | unlock_task_sighand(child, &flags); |
689 | } |
690 | return error; |
691 | } |
692 | |
693 | static int ptrace_peek_siginfo(struct task_struct *child, |
694 | unsigned long addr, |
695 | unsigned long data) |
696 | { |
697 | struct ptrace_peeksiginfo_args arg; |
698 | struct sigpending *pending; |
699 | struct sigqueue *q; |
700 | int ret, i; |
701 | |
702 | ret = copy_from_user(&arg, (void __user *) addr, |
703 | sizeof(struct ptrace_peeksiginfo_args)); |
704 | if (ret) |
705 | return -EFAULT; |
706 | |
707 | if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) |
708 | return -EINVAL; /* unknown flags */ |
709 | |
710 | if (arg.nr < 0) |
711 | return -EINVAL; |
712 | |
713 | if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) |
714 | pending = &child->signal->shared_pending; |
715 | else |
716 | pending = &child->pending; |
717 | |
718 | for (i = 0; i < arg.nr; ) { |
719 | siginfo_t info; |
720 | s32 off = arg.off + i; |
721 | |
722 | spin_lock_irq(&child->sighand->siglock); |
723 | list_for_each_entry(q, &pending->list, list) { |
724 | if (!off--) { |
725 | copy_siginfo(&info, &q->info); |
726 | break; |
727 | } |
728 | } |
729 | spin_unlock_irq(&child->sighand->siglock); |
730 | |
731 | if (off >= 0) /* beyond the end of the list */ |
732 | break; |
733 | |
734 | #ifdef CONFIG_COMPAT |
735 | if (unlikely(in_compat_syscall())) { |
736 | compat_siginfo_t __user *uinfo = compat_ptr(data); |
737 | |
738 | if (copy_siginfo_to_user32(uinfo, &info) || |
739 | __put_user(info.si_code, &uinfo->si_code)) { |
740 | ret = -EFAULT; |
741 | break; |
742 | } |
743 | |
744 | } else |
745 | #endif |
746 | { |
747 | siginfo_t __user *uinfo = (siginfo_t __user *) data; |
748 | |
749 | if (copy_siginfo_to_user(uinfo, &info) || |
750 | __put_user(info.si_code, &uinfo->si_code)) { |
751 | ret = -EFAULT; |
752 | break; |
753 | } |
754 | } |
755 | |
756 | data += sizeof(siginfo_t); |
757 | i++; |
758 | |
759 | if (signal_pending(current)) |
760 | break; |
761 | |
762 | cond_resched(); |
763 | } |
764 | |
765 | if (i > 0) |
766 | return i; |
767 | |
768 | return ret; |
769 | } |
770 | |
771 | #ifdef PTRACE_SINGLESTEP |
772 | #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) |
773 | #else |
774 | #define is_singlestep(request) 0 |
775 | #endif |
776 | |
777 | #ifdef PTRACE_SINGLEBLOCK |
778 | #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) |
779 | #else |
780 | #define is_singleblock(request) 0 |
781 | #endif |
782 | |
783 | #ifdef PTRACE_SYSEMU |
784 | #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) |
785 | #else |
786 | #define is_sysemu_singlestep(request) 0 |
787 | #endif |
788 | |
789 | static int ptrace_resume(struct task_struct *child, long request, |
790 | unsigned long data) |
791 | { |
792 | bool need_siglock; |
793 | |
794 | if (!valid_signal(data)) |
795 | return -EIO; |
796 | |
797 | if (request == PTRACE_SYSCALL) |
798 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
799 | else |
800 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
801 | |
802 | #ifdef TIF_SYSCALL_EMU |
803 | if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) |
804 | set_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
805 | else |
806 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
807 | #endif |
808 | |
809 | if (is_singleblock(request)) { |
810 | if (unlikely(!arch_has_block_step())) |
811 | return -EIO; |
812 | user_enable_block_step(child); |
813 | } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { |
814 | if (unlikely(!arch_has_single_step())) |
815 | return -EIO; |
816 | user_enable_single_step(child); |
817 | } else { |
818 | user_disable_single_step(child); |
819 | } |
820 | |
821 | /* |
822 | * Change ->exit_code and ->state under siglock to avoid the race |
823 | * with wait_task_stopped() in between; a non-zero ->exit_code will |
824 | * wrongly look like another report from tracee. |
825 | * |
826 | * Note that we need siglock even if ->exit_code == data and/or this |
827 | * status was not reported yet, the new status must not be cleared by |
828 | * wait_task_stopped() after resume. |
829 | * |
830 | * If data == 0 we do not care if wait_task_stopped() reports the old |
831 | * status and clears the code too; this can't race with the tracee, it |
832 | * takes siglock after resume. |
833 | */ |
834 | need_siglock = data && !thread_group_empty(current); |
835 | if (need_siglock) |
836 | spin_lock_irq(&child->sighand->siglock); |
837 | child->exit_code = data; |
838 | wake_up_state(child, __TASK_TRACED); |
839 | if (need_siglock) |
840 | spin_unlock_irq(&child->sighand->siglock); |
841 | |
842 | return 0; |
843 | } |
844 | |
845 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
846 | |
847 | static const struct user_regset * |
848 | find_regset(const struct user_regset_view *view, unsigned int type) |
849 | { |
850 | const struct user_regset *regset; |
851 | int n; |
852 | |
853 | for (n = 0; n < view->n; ++n) { |
854 | regset = view->regsets + n; |
855 | if (regset->core_note_type == type) |
856 | return regset; |
857 | } |
858 | |
859 | return NULL; |
860 | } |
861 | |
862 | static int ptrace_regset(struct task_struct *task, int req, unsigned int type, |
863 | struct iovec *kiov) |
864 | { |
865 | const struct user_regset_view *view = task_user_regset_view(task); |
866 | const struct user_regset *regset = find_regset(view, type); |
867 | int regset_no; |
868 | |
869 | if (!regset || (kiov->iov_len % regset->size) != 0) |
870 | return -EINVAL; |
871 | |
872 | regset_no = regset - view->regsets; |
873 | kiov->iov_len = min(kiov->iov_len, |
874 | (__kernel_size_t) (regset->n * regset->size)); |
875 | |
876 | if (req == PTRACE_GETREGSET) |
877 | return copy_regset_to_user(task, view, regset_no, 0, |
878 | kiov->iov_len, kiov->iov_base); |
879 | else |
880 | return copy_regset_from_user(task, view, regset_no, 0, |
881 | kiov->iov_len, kiov->iov_base); |
882 | } |
883 | |
884 | /* |
885 | * This is declared in linux/regset.h and defined in machine-dependent |
886 | * code. We put the export here, near the primary machine-neutral use, |
887 | * to ensure no machine forgets it. |
888 | */ |
889 | EXPORT_SYMBOL_GPL(task_user_regset_view); |
890 | #endif |
891 | |
892 | int ptrace_request(struct task_struct *child, long request, |
893 | unsigned long addr, unsigned long data) |
894 | { |
895 | bool seized = child->ptrace & PT_SEIZED; |
896 | int ret = -EIO; |
897 | siginfo_t siginfo, *si; |
898 | void __user *datavp = (void __user *) data; |
899 | unsigned long __user *datalp = datavp; |
900 | unsigned long flags; |
901 | |
902 | switch (request) { |
903 | case PTRACE_PEEKTEXT: |
904 | case PTRACE_PEEKDATA: |
905 | return generic_ptrace_peekdata(child, addr, data); |
906 | case PTRACE_POKETEXT: |
907 | case PTRACE_POKEDATA: |
908 | return generic_ptrace_pokedata(child, addr, data); |
909 | |
910 | #ifdef PTRACE_OLDSETOPTIONS |
911 | case PTRACE_OLDSETOPTIONS: |
912 | #endif |
913 | case PTRACE_SETOPTIONS: |
914 | ret = ptrace_setoptions(child, data); |
915 | break; |
916 | case PTRACE_GETEVENTMSG: |
917 | ret = put_user(child->ptrace_message, datalp); |
918 | break; |
919 | |
920 | case PTRACE_PEEKSIGINFO: |
921 | ret = ptrace_peek_siginfo(child, addr, data); |
922 | break; |
923 | |
924 | case PTRACE_GETSIGINFO: |
925 | ret = ptrace_getsiginfo(child, &siginfo); |
926 | if (!ret) |
927 | ret = copy_siginfo_to_user(datavp, &siginfo); |
928 | break; |
929 | |
930 | case PTRACE_SETSIGINFO: |
931 | if (copy_from_user(&siginfo, datavp, sizeof siginfo)) |
932 | ret = -EFAULT; |
933 | else |
934 | ret = ptrace_setsiginfo(child, &siginfo); |
935 | break; |
936 | |
937 | case PTRACE_GETSIGMASK: |
938 | if (addr != sizeof(sigset_t)) { |
939 | ret = -EINVAL; |
940 | break; |
941 | } |
942 | |
943 | if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) |
944 | ret = -EFAULT; |
945 | else |
946 | ret = 0; |
947 | |
948 | break; |
949 | |
950 | case PTRACE_SETSIGMASK: { |
951 | sigset_t new_set; |
952 | |
953 | if (addr != sizeof(sigset_t)) { |
954 | ret = -EINVAL; |
955 | break; |
956 | } |
957 | |
958 | if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { |
959 | ret = -EFAULT; |
960 | break; |
961 | } |
962 | |
963 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
964 | |
965 | /* |
966 | * Every thread does recalc_sigpending() after resume, so |
967 | * retarget_shared_pending() and recalc_sigpending() are not |
968 | * called here. |
969 | */ |
970 | spin_lock_irq(&child->sighand->siglock); |
971 | child->blocked = new_set; |
972 | spin_unlock_irq(&child->sighand->siglock); |
973 | |
974 | ret = 0; |
975 | break; |
976 | } |
977 | |
978 | case PTRACE_INTERRUPT: |
979 | /* |
980 | * Stop tracee without any side-effect on signal or job |
981 | * control. At least one trap is guaranteed to happen |
982 | * after this request. If @child is already trapped, the |
983 | * current trap is not disturbed and another trap will |
984 | * happen after the current trap is ended with PTRACE_CONT. |
985 | * |
986 | * The actual trap might not be PTRACE_EVENT_STOP trap but |
987 | * the pending condition is cleared regardless. |
988 | */ |
989 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) |
990 | break; |
991 | |
992 | /* |
993 | * INTERRUPT doesn't disturb existing trap sans one |
994 | * exception. If ptracer issued LISTEN for the current |
995 | * STOP, this INTERRUPT should clear LISTEN and re-trap |
996 | * tracee into STOP. |
997 | */ |
998 | if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) |
999 | ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); |
1000 | |
1001 | unlock_task_sighand(child, &flags); |
1002 | ret = 0; |
1003 | break; |
1004 | |
1005 | case PTRACE_LISTEN: |
1006 | /* |
1007 | * Listen for events. Tracee must be in STOP. It's not |
1008 | * resumed per-se but is not considered to be in TRACED by |
1009 | * wait(2) or ptrace(2). If an async event (e.g. group |
1010 | * stop state change) happens, tracee will enter STOP trap |
1011 | * again. Alternatively, ptracer can issue INTERRUPT to |
1012 | * finish listening and re-trap tracee into STOP. |
1013 | */ |
1014 | if (unlikely(!seized || !lock_task_sighand(child, &flags))) |
1015 | break; |
1016 | |
1017 | si = child->last_siginfo; |
1018 | if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { |
1019 | child->jobctl |= JOBCTL_LISTENING; |
1020 | /* |
1021 | * If NOTIFY is set, it means event happened between |
1022 | * start of this trap and now. Trigger re-trap. |
1023 | */ |
1024 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) |
1025 | ptrace_signal_wake_up(child, true); |
1026 | ret = 0; |
1027 | } |
1028 | unlock_task_sighand(child, &flags); |
1029 | break; |
1030 | |
1031 | case PTRACE_DETACH: /* detach a process that was attached. */ |
1032 | ret = ptrace_detach(child, data); |
1033 | break; |
1034 | |
1035 | #ifdef CONFIG_BINFMT_ELF_FDPIC |
1036 | case PTRACE_GETFDPIC: { |
1037 | struct mm_struct *mm = get_task_mm(child); |
1038 | unsigned long tmp = 0; |
1039 | |
1040 | ret = -ESRCH; |
1041 | if (!mm) |
1042 | break; |
1043 | |
1044 | switch (addr) { |
1045 | case PTRACE_GETFDPIC_EXEC: |
1046 | tmp = mm->context.exec_fdpic_loadmap; |
1047 | break; |
1048 | case PTRACE_GETFDPIC_INTERP: |
1049 | tmp = mm->context.interp_fdpic_loadmap; |
1050 | break; |
1051 | default: |
1052 | break; |
1053 | } |
1054 | mmput(mm); |
1055 | |
1056 | ret = put_user(tmp, datalp); |
1057 | break; |
1058 | } |
1059 | #endif |
1060 | |
1061 | #ifdef PTRACE_SINGLESTEP |
1062 | case PTRACE_SINGLESTEP: |
1063 | #endif |
1064 | #ifdef PTRACE_SINGLEBLOCK |
1065 | case PTRACE_SINGLEBLOCK: |
1066 | #endif |
1067 | #ifdef PTRACE_SYSEMU |
1068 | case PTRACE_SYSEMU: |
1069 | case PTRACE_SYSEMU_SINGLESTEP: |
1070 | #endif |
1071 | case PTRACE_SYSCALL: |
1072 | case PTRACE_CONT: |
1073 | return ptrace_resume(child, request, data); |
1074 | |
1075 | case PTRACE_KILL: |
1076 | if (child->exit_state) /* already dead */ |
1077 | return 0; |
1078 | return ptrace_resume(child, request, SIGKILL); |
1079 | |
1080 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
1081 | case PTRACE_GETREGSET: |
1082 | case PTRACE_SETREGSET: { |
1083 | struct iovec kiov; |
1084 | struct iovec __user *uiov = datavp; |
1085 | |
1086 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) |
1087 | return -EFAULT; |
1088 | |
1089 | if (__get_user(kiov.iov_base, &uiov->iov_base) || |
1090 | __get_user(kiov.iov_len, &uiov->iov_len)) |
1091 | return -EFAULT; |
1092 | |
1093 | ret = ptrace_regset(child, request, addr, &kiov); |
1094 | if (!ret) |
1095 | ret = __put_user(kiov.iov_len, &uiov->iov_len); |
1096 | break; |
1097 | } |
1098 | #endif |
1099 | |
1100 | case PTRACE_SECCOMP_GET_FILTER: |
1101 | ret = seccomp_get_filter(child, addr, datavp); |
1102 | break; |
1103 | |
1104 | default: |
1105 | break; |
1106 | } |
1107 | |
1108 | return ret; |
1109 | } |
1110 | |
1111 | static struct task_struct *ptrace_get_task_struct(pid_t pid) |
1112 | { |
1113 | struct task_struct *child; |
1114 | |
1115 | rcu_read_lock(); |
1116 | child = find_task_by_vpid(pid); |
1117 | if (child) |
1118 | get_task_struct(child); |
1119 | rcu_read_unlock(); |
1120 | |
1121 | if (!child) |
1122 | return ERR_PTR(-ESRCH); |
1123 | return child; |
1124 | } |
1125 | |
1126 | #ifndef arch_ptrace_attach |
1127 | #define arch_ptrace_attach(child) do { } while (0) |
1128 | #endif |
1129 | |
1130 | SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, |
1131 | unsigned long, data) |
1132 | { |
1133 | struct task_struct *child; |
1134 | long ret; |
1135 | |
1136 | if (request == PTRACE_TRACEME) { |
1137 | ret = ptrace_traceme(); |
1138 | if (!ret) |
1139 | arch_ptrace_attach(current); |
1140 | goto out; |
1141 | } |
1142 | |
1143 | child = ptrace_get_task_struct(pid); |
1144 | if (IS_ERR(child)) { |
1145 | ret = PTR_ERR(child); |
1146 | goto out; |
1147 | } |
1148 | |
1149 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
1150 | ret = ptrace_attach(child, request, addr, data); |
1151 | /* |
1152 | * Some architectures need to do book-keeping after |
1153 | * a ptrace attach. |
1154 | */ |
1155 | if (!ret) |
1156 | arch_ptrace_attach(child); |
1157 | goto out_put_task_struct; |
1158 | } |
1159 | |
1160 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
1161 | request == PTRACE_INTERRUPT); |
1162 | if (ret < 0) |
1163 | goto out_put_task_struct; |
1164 | |
1165 | ret = arch_ptrace(child, request, addr, data); |
1166 | if (ret || request != PTRACE_DETACH) |
1167 | ptrace_unfreeze_traced(child); |
1168 | |
1169 | out_put_task_struct: |
1170 | put_task_struct(child); |
1171 | out: |
1172 | return ret; |
1173 | } |
1174 | |
1175 | int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, |
1176 | unsigned long data) |
1177 | { |
1178 | unsigned long tmp; |
1179 | int copied; |
1180 | |
1181 | copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); |
1182 | if (copied != sizeof(tmp)) |
1183 | return -EIO; |
1184 | return put_user(tmp, (unsigned long __user *)data); |
1185 | } |
1186 | |
1187 | int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, |
1188 | unsigned long data) |
1189 | { |
1190 | int copied; |
1191 | |
1192 | copied = ptrace_access_vm(tsk, addr, &data, sizeof(data), |
1193 | FOLL_FORCE | FOLL_WRITE); |
1194 | return (copied == sizeof(data)) ? 0 : -EIO; |
1195 | } |
1196 | |
1197 | #if defined CONFIG_COMPAT |
1198 | |
1199 | int compat_ptrace_request(struct task_struct *child, compat_long_t request, |
1200 | compat_ulong_t addr, compat_ulong_t data) |
1201 | { |
1202 | compat_ulong_t __user *datap = compat_ptr(data); |
1203 | compat_ulong_t word; |
1204 | siginfo_t siginfo; |
1205 | int ret; |
1206 | |
1207 | switch (request) { |
1208 | case PTRACE_PEEKTEXT: |
1209 | case PTRACE_PEEKDATA: |
1210 | ret = ptrace_access_vm(child, addr, &word, sizeof(word), |
1211 | FOLL_FORCE); |
1212 | if (ret != sizeof(word)) |
1213 | ret = -EIO; |
1214 | else |
1215 | ret = put_user(word, datap); |
1216 | break; |
1217 | |
1218 | case PTRACE_POKETEXT: |
1219 | case PTRACE_POKEDATA: |
1220 | ret = ptrace_access_vm(child, addr, &data, sizeof(data), |
1221 | FOLL_FORCE | FOLL_WRITE); |
1222 | ret = (ret != sizeof(data) ? -EIO : 0); |
1223 | break; |
1224 | |
1225 | case PTRACE_GETEVENTMSG: |
1226 | ret = put_user((compat_ulong_t) child->ptrace_message, datap); |
1227 | break; |
1228 | |
1229 | case PTRACE_GETSIGINFO: |
1230 | ret = ptrace_getsiginfo(child, &siginfo); |
1231 | if (!ret) |
1232 | ret = copy_siginfo_to_user32( |
1233 | (struct compat_siginfo __user *) datap, |
1234 | &siginfo); |
1235 | break; |
1236 | |
1237 | case PTRACE_SETSIGINFO: |
1238 | memset(&siginfo, 0, sizeof siginfo); |
1239 | if (copy_siginfo_from_user32( |
1240 | &siginfo, (struct compat_siginfo __user *) datap)) |
1241 | ret = -EFAULT; |
1242 | else |
1243 | ret = ptrace_setsiginfo(child, &siginfo); |
1244 | break; |
1245 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
1246 | case PTRACE_GETREGSET: |
1247 | case PTRACE_SETREGSET: |
1248 | { |
1249 | struct iovec kiov; |
1250 | struct compat_iovec __user *uiov = |
1251 | (struct compat_iovec __user *) datap; |
1252 | compat_uptr_t ptr; |
1253 | compat_size_t len; |
1254 | |
1255 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) |
1256 | return -EFAULT; |
1257 | |
1258 | if (__get_user(ptr, &uiov->iov_base) || |
1259 | __get_user(len, &uiov->iov_len)) |
1260 | return -EFAULT; |
1261 | |
1262 | kiov.iov_base = compat_ptr(ptr); |
1263 | kiov.iov_len = len; |
1264 | |
1265 | ret = ptrace_regset(child, request, addr, &kiov); |
1266 | if (!ret) |
1267 | ret = __put_user(kiov.iov_len, &uiov->iov_len); |
1268 | break; |
1269 | } |
1270 | #endif |
1271 | |
1272 | default: |
1273 | ret = ptrace_request(child, request, addr, data); |
1274 | } |
1275 | |
1276 | return ret; |
1277 | } |
1278 | |
1279 | COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, |
1280 | compat_long_t, addr, compat_long_t, data) |
1281 | { |
1282 | struct task_struct *child; |
1283 | long ret; |
1284 | |
1285 | if (request == PTRACE_TRACEME) { |
1286 | ret = ptrace_traceme(); |
1287 | goto out; |
1288 | } |
1289 | |
1290 | child = ptrace_get_task_struct(pid); |
1291 | if (IS_ERR(child)) { |
1292 | ret = PTR_ERR(child); |
1293 | goto out; |
1294 | } |
1295 | |
1296 | if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { |
1297 | ret = ptrace_attach(child, request, addr, data); |
1298 | /* |
1299 | * Some architectures need to do book-keeping after |
1300 | * a ptrace attach. |
1301 | */ |
1302 | if (!ret) |
1303 | arch_ptrace_attach(child); |
1304 | goto out_put_task_struct; |
1305 | } |
1306 | |
1307 | ret = ptrace_check_attach(child, request == PTRACE_KILL || |
1308 | request == PTRACE_INTERRUPT); |
1309 | if (!ret) { |
1310 | ret = compat_arch_ptrace(child, request, addr, data); |
1311 | if (ret || request != PTRACE_DETACH) |
1312 | ptrace_unfreeze_traced(child); |
1313 | } |
1314 | |
1315 | out_put_task_struct: |
1316 | put_task_struct(child); |
1317 | out: |
1318 | return ret; |
1319 | } |
1320 | #endif /* CONFIG_COMPAT */ |
1321 |