blob: c091dcc9f19b0d84f664040ab3363d085594778f
1 | /* |
2 | * linux/kernel/signal.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
7 | * |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
9 | * Changes to use preallocated sigqueue structures |
10 | * to allow signals to be sent reliably. |
11 | */ |
12 | |
13 | #include <linux/slab.h> |
14 | #include <linux/export.h> |
15 | #include <linux/init.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/tty.h> |
19 | #include <linux/binfmts.h> |
20 | #include <linux/coredump.h> |
21 | #include <linux/security.h> |
22 | #include <linux/syscalls.h> |
23 | #include <linux/ptrace.h> |
24 | #include <linux/signal.h> |
25 | #include <linux/signalfd.h> |
26 | #include <linux/ratelimit.h> |
27 | #include <linux/tracehook.h> |
28 | #include <linux/capability.h> |
29 | #include <linux/freezer.h> |
30 | #include <linux/pid_namespace.h> |
31 | #include <linux/nsproxy.h> |
32 | #include <linux/user_namespace.h> |
33 | #include <linux/uprobes.h> |
34 | #include <linux/compat.h> |
35 | #include <linux/cn_proc.h> |
36 | #include <linux/compiler.h> |
37 | |
38 | #define CREATE_TRACE_POINTS |
39 | #include <trace/events/signal.h> |
40 | |
41 | #include <asm/param.h> |
42 | #include <asm/uaccess.h> |
43 | #include <asm/unistd.h> |
44 | #include <asm/siginfo.h> |
45 | #include <asm/cacheflush.h> |
46 | #include "audit.h" /* audit_signal_info() */ |
47 | |
48 | /* |
49 | * SLAB caches for signal bits. |
50 | */ |
51 | |
52 | static struct kmem_cache *sigqueue_cachep; |
53 | |
54 | int print_fatal_signals __read_mostly; |
55 | |
56 | static void __user *sig_handler(struct task_struct *t, int sig) |
57 | { |
58 | return t->sighand->action[sig - 1].sa.sa_handler; |
59 | } |
60 | |
61 | static int sig_handler_ignored(void __user *handler, int sig) |
62 | { |
63 | /* Is it explicitly or implicitly ignored? */ |
64 | return handler == SIG_IGN || |
65 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
66 | } |
67 | |
68 | static int sig_task_ignored(struct task_struct *t, int sig, bool force) |
69 | { |
70 | void __user *handler; |
71 | |
72 | handler = sig_handler(t, sig); |
73 | |
74 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
75 | handler == SIG_DFL && !(force && sig_kernel_only(sig))) |
76 | return 1; |
77 | |
78 | return sig_handler_ignored(handler, sig); |
79 | } |
80 | |
81 | static int sig_ignored(struct task_struct *t, int sig, bool force) |
82 | { |
83 | /* |
84 | * Blocked signals are never ignored, since the |
85 | * signal handler may change by the time it is |
86 | * unblocked. |
87 | */ |
88 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
89 | return 0; |
90 | |
91 | /* |
92 | * Tracers may want to know about even ignored signal unless it |
93 | * is SIGKILL which can't be reported anyway but can be ignored |
94 | * by SIGNAL_UNKILLABLE task. |
95 | */ |
96 | if (t->ptrace && sig != SIGKILL) |
97 | return 0; |
98 | |
99 | return sig_task_ignored(t, sig, force); |
100 | } |
101 | |
102 | /* |
103 | * Re-calculate pending state from the set of locally pending |
104 | * signals, globally pending signals, and blocked signals. |
105 | */ |
106 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) |
107 | { |
108 | unsigned long ready; |
109 | long i; |
110 | |
111 | switch (_NSIG_WORDS) { |
112 | default: |
113 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
114 | ready |= signal->sig[i] &~ blocked->sig[i]; |
115 | break; |
116 | |
117 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
118 | ready |= signal->sig[2] &~ blocked->sig[2]; |
119 | ready |= signal->sig[1] &~ blocked->sig[1]; |
120 | ready |= signal->sig[0] &~ blocked->sig[0]; |
121 | break; |
122 | |
123 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
124 | ready |= signal->sig[0] &~ blocked->sig[0]; |
125 | break; |
126 | |
127 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
128 | } |
129 | return ready != 0; |
130 | } |
131 | |
132 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
133 | |
134 | static int recalc_sigpending_tsk(struct task_struct *t) |
135 | { |
136 | if ((t->jobctl & JOBCTL_PENDING_MASK) || |
137 | PENDING(&t->pending, &t->blocked) || |
138 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
139 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
140 | return 1; |
141 | } |
142 | /* |
143 | * We must never clear the flag in another thread, or in current |
144 | * when it's possible the current syscall is returning -ERESTART*. |
145 | * So we don't clear it here, and only callers who know they should do. |
146 | */ |
147 | return 0; |
148 | } |
149 | |
150 | /* |
151 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. |
152 | * This is superfluous when called on current, the wakeup is a harmless no-op. |
153 | */ |
154 | void recalc_sigpending_and_wake(struct task_struct *t) |
155 | { |
156 | if (recalc_sigpending_tsk(t)) |
157 | signal_wake_up(t, 0); |
158 | } |
159 | |
160 | void recalc_sigpending(void) |
161 | { |
162 | if (!recalc_sigpending_tsk(current) && !freezing(current)) |
163 | clear_thread_flag(TIF_SIGPENDING); |
164 | |
165 | } |
166 | |
167 | /* Given the mask, find the first available signal that should be serviced. */ |
168 | |
169 | #define SYNCHRONOUS_MASK \ |
170 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
171 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
172 | |
173 | int next_signal(struct sigpending *pending, sigset_t *mask) |
174 | { |
175 | unsigned long i, *s, *m, x; |
176 | int sig = 0; |
177 | |
178 | s = pending->signal.sig; |
179 | m = mask->sig; |
180 | |
181 | /* |
182 | * Handle the first word specially: it contains the |
183 | * synchronous signals that need to be dequeued first. |
184 | */ |
185 | x = *s &~ *m; |
186 | if (x) { |
187 | if (x & SYNCHRONOUS_MASK) |
188 | x &= SYNCHRONOUS_MASK; |
189 | sig = ffz(~x) + 1; |
190 | return sig; |
191 | } |
192 | |
193 | switch (_NSIG_WORDS) { |
194 | default: |
195 | for (i = 1; i < _NSIG_WORDS; ++i) { |
196 | x = *++s &~ *++m; |
197 | if (!x) |
198 | continue; |
199 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
200 | break; |
201 | } |
202 | break; |
203 | |
204 | case 2: |
205 | x = s[1] &~ m[1]; |
206 | if (!x) |
207 | break; |
208 | sig = ffz(~x) + _NSIG_BPW + 1; |
209 | break; |
210 | |
211 | case 1: |
212 | /* Nothing to do */ |
213 | break; |
214 | } |
215 | |
216 | return sig; |
217 | } |
218 | |
219 | static inline void print_dropped_signal(int sig) |
220 | { |
221 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
222 | |
223 | if (!print_fatal_signals) |
224 | return; |
225 | |
226 | if (!__ratelimit(&ratelimit_state)) |
227 | return; |
228 | |
229 | pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
230 | current->comm, current->pid, sig); |
231 | } |
232 | |
233 | /** |
234 | * task_set_jobctl_pending - set jobctl pending bits |
235 | * @task: target task |
236 | * @mask: pending bits to set |
237 | * |
238 | * Clear @mask from @task->jobctl. @mask must be subset of |
239 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | |
240 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is |
241 | * cleared. If @task is already being killed or exiting, this function |
242 | * becomes noop. |
243 | * |
244 | * CONTEXT: |
245 | * Must be called with @task->sighand->siglock held. |
246 | * |
247 | * RETURNS: |
248 | * %true if @mask is set, %false if made noop because @task was dying. |
249 | */ |
250 | bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) |
251 | { |
252 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | |
253 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); |
254 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); |
255 | |
256 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) |
257 | return false; |
258 | |
259 | if (mask & JOBCTL_STOP_SIGMASK) |
260 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; |
261 | |
262 | task->jobctl |= mask; |
263 | return true; |
264 | } |
265 | |
266 | /** |
267 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
268 | * @task: target task |
269 | * |
270 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
271 | * Clear it and wake up the ptracer. Note that we don't need any further |
272 | * locking. @task->siglock guarantees that @task->parent points to the |
273 | * ptracer. |
274 | * |
275 | * CONTEXT: |
276 | * Must be called with @task->sighand->siglock held. |
277 | */ |
278 | void task_clear_jobctl_trapping(struct task_struct *task) |
279 | { |
280 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
281 | task->jobctl &= ~JOBCTL_TRAPPING; |
282 | smp_mb(); /* advised by wake_up_bit() */ |
283 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
284 | } |
285 | } |
286 | |
287 | /** |
288 | * task_clear_jobctl_pending - clear jobctl pending bits |
289 | * @task: target task |
290 | * @mask: pending bits to clear |
291 | * |
292 | * Clear @mask from @task->jobctl. @mask must be subset of |
293 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other |
294 | * STOP bits are cleared together. |
295 | * |
296 | * If clearing of @mask leaves no stop or trap pending, this function calls |
297 | * task_clear_jobctl_trapping(). |
298 | * |
299 | * CONTEXT: |
300 | * Must be called with @task->sighand->siglock held. |
301 | */ |
302 | void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) |
303 | { |
304 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
305 | |
306 | if (mask & JOBCTL_STOP_PENDING) |
307 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; |
308 | |
309 | task->jobctl &= ~mask; |
310 | |
311 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) |
312 | task_clear_jobctl_trapping(task); |
313 | } |
314 | |
315 | /** |
316 | * task_participate_group_stop - participate in a group stop |
317 | * @task: task participating in a group stop |
318 | * |
319 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
320 | * Group stop states are cleared and the group stop count is consumed if |
321 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
322 | * stop, the appropriate %SIGNAL_* flags are set. |
323 | * |
324 | * CONTEXT: |
325 | * Must be called with @task->sighand->siglock held. |
326 | * |
327 | * RETURNS: |
328 | * %true if group stop completion should be notified to the parent, %false |
329 | * otherwise. |
330 | */ |
331 | static bool task_participate_group_stop(struct task_struct *task) |
332 | { |
333 | struct signal_struct *sig = task->signal; |
334 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
335 | |
336 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
337 | |
338 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
339 | |
340 | if (!consume) |
341 | return false; |
342 | |
343 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) |
344 | sig->group_stop_count--; |
345 | |
346 | /* |
347 | * Tell the caller to notify completion iff we are entering into a |
348 | * fresh group stop. Read comment in do_signal_stop() for details. |
349 | */ |
350 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
351 | signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); |
352 | return true; |
353 | } |
354 | return false; |
355 | } |
356 | |
357 | /* |
358 | * allocate a new signal queue record |
359 | * - this may be called without locks if and only if t == current, otherwise an |
360 | * appropriate lock must be held to stop the target task from exiting |
361 | */ |
362 | static struct sigqueue * |
363 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
364 | { |
365 | struct sigqueue *q = NULL; |
366 | struct user_struct *user; |
367 | |
368 | /* |
369 | * Protect access to @t credentials. This can go away when all |
370 | * callers hold rcu read lock. |
371 | */ |
372 | rcu_read_lock(); |
373 | user = get_uid(__task_cred(t)->user); |
374 | atomic_inc(&user->sigpending); |
375 | rcu_read_unlock(); |
376 | |
377 | if (override_rlimit || |
378 | atomic_read(&user->sigpending) <= |
379 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
380 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
381 | } else { |
382 | print_dropped_signal(sig); |
383 | } |
384 | |
385 | if (unlikely(q == NULL)) { |
386 | atomic_dec(&user->sigpending); |
387 | free_uid(user); |
388 | } else { |
389 | INIT_LIST_HEAD(&q->list); |
390 | q->flags = 0; |
391 | q->user = user; |
392 | } |
393 | |
394 | return q; |
395 | } |
396 | |
397 | static void __sigqueue_free(struct sigqueue *q) |
398 | { |
399 | if (q->flags & SIGQUEUE_PREALLOC) |
400 | return; |
401 | atomic_dec(&q->user->sigpending); |
402 | free_uid(q->user); |
403 | kmem_cache_free(sigqueue_cachep, q); |
404 | } |
405 | |
406 | void flush_sigqueue(struct sigpending *queue) |
407 | { |
408 | struct sigqueue *q; |
409 | |
410 | sigemptyset(&queue->signal); |
411 | while (!list_empty(&queue->list)) { |
412 | q = list_entry(queue->list.next, struct sigqueue , list); |
413 | list_del_init(&q->list); |
414 | __sigqueue_free(q); |
415 | } |
416 | } |
417 | |
418 | /* |
419 | * Flush all pending signals for this kthread. |
420 | */ |
421 | void flush_signals(struct task_struct *t) |
422 | { |
423 | unsigned long flags; |
424 | |
425 | spin_lock_irqsave(&t->sighand->siglock, flags); |
426 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
427 | flush_sigqueue(&t->pending); |
428 | flush_sigqueue(&t->signal->shared_pending); |
429 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
430 | } |
431 | |
432 | static void __flush_itimer_signals(struct sigpending *pending) |
433 | { |
434 | sigset_t signal, retain; |
435 | struct sigqueue *q, *n; |
436 | |
437 | signal = pending->signal; |
438 | sigemptyset(&retain); |
439 | |
440 | list_for_each_entry_safe(q, n, &pending->list, list) { |
441 | int sig = q->info.si_signo; |
442 | |
443 | if (likely(q->info.si_code != SI_TIMER)) { |
444 | sigaddset(&retain, sig); |
445 | } else { |
446 | sigdelset(&signal, sig); |
447 | list_del_init(&q->list); |
448 | __sigqueue_free(q); |
449 | } |
450 | } |
451 | |
452 | sigorsets(&pending->signal, &signal, &retain); |
453 | } |
454 | |
455 | void flush_itimer_signals(void) |
456 | { |
457 | struct task_struct *tsk = current; |
458 | unsigned long flags; |
459 | |
460 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
461 | __flush_itimer_signals(&tsk->pending); |
462 | __flush_itimer_signals(&tsk->signal->shared_pending); |
463 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
464 | } |
465 | |
466 | void ignore_signals(struct task_struct *t) |
467 | { |
468 | int i; |
469 | |
470 | for (i = 0; i < _NSIG; ++i) |
471 | t->sighand->action[i].sa.sa_handler = SIG_IGN; |
472 | |
473 | flush_signals(t); |
474 | } |
475 | |
476 | /* |
477 | * Flush all handlers for a task. |
478 | */ |
479 | |
480 | void |
481 | flush_signal_handlers(struct task_struct *t, int force_default) |
482 | { |
483 | int i; |
484 | struct k_sigaction *ka = &t->sighand->action[0]; |
485 | for (i = _NSIG ; i != 0 ; i--) { |
486 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
487 | ka->sa.sa_handler = SIG_DFL; |
488 | ka->sa.sa_flags = 0; |
489 | #ifdef __ARCH_HAS_SA_RESTORER |
490 | ka->sa.sa_restorer = NULL; |
491 | #endif |
492 | sigemptyset(&ka->sa.sa_mask); |
493 | ka++; |
494 | } |
495 | } |
496 | |
497 | int unhandled_signal(struct task_struct *tsk, int sig) |
498 | { |
499 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
500 | if (is_global_init(tsk)) |
501 | return 1; |
502 | if (handler != SIG_IGN && handler != SIG_DFL) |
503 | return 0; |
504 | /* if ptraced, let the tracer determine */ |
505 | return !tsk->ptrace; |
506 | } |
507 | |
508 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, |
509 | bool *resched_timer) |
510 | { |
511 | struct sigqueue *q, *first = NULL; |
512 | |
513 | /* |
514 | * Collect the siginfo appropriate to this signal. Check if |
515 | * there is another siginfo for the same signal. |
516 | */ |
517 | list_for_each_entry(q, &list->list, list) { |
518 | if (q->info.si_signo == sig) { |
519 | if (first) |
520 | goto still_pending; |
521 | first = q; |
522 | } |
523 | } |
524 | |
525 | sigdelset(&list->signal, sig); |
526 | |
527 | if (first) { |
528 | still_pending: |
529 | list_del_init(&first->list); |
530 | copy_siginfo(info, &first->info); |
531 | |
532 | *resched_timer = |
533 | (first->flags & SIGQUEUE_PREALLOC) && |
534 | (info->si_code == SI_TIMER) && |
535 | (info->si_sys_private); |
536 | |
537 | __sigqueue_free(first); |
538 | } else { |
539 | /* |
540 | * Ok, it wasn't in the queue. This must be |
541 | * a fast-pathed signal or we must have been |
542 | * out of queue space. So zero out the info. |
543 | */ |
544 | info->si_signo = sig; |
545 | info->si_errno = 0; |
546 | info->si_code = SI_USER; |
547 | info->si_pid = 0; |
548 | info->si_uid = 0; |
549 | } |
550 | } |
551 | |
552 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
553 | siginfo_t *info, bool *resched_timer) |
554 | { |
555 | int sig = next_signal(pending, mask); |
556 | |
557 | if (sig) |
558 | collect_signal(sig, pending, info, resched_timer); |
559 | return sig; |
560 | } |
561 | |
562 | /* |
563 | * Dequeue a signal and return the element to the caller, which is |
564 | * expected to free it. |
565 | * |
566 | * All callers have to hold the siglock. |
567 | */ |
568 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
569 | { |
570 | bool resched_timer = false; |
571 | int signr; |
572 | |
573 | /* We only dequeue private signals from ourselves, we don't let |
574 | * signalfd steal them |
575 | */ |
576 | signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); |
577 | if (!signr) { |
578 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
579 | mask, info, &resched_timer); |
580 | /* |
581 | * itimer signal ? |
582 | * |
583 | * itimers are process shared and we restart periodic |
584 | * itimers in the signal delivery path to prevent DoS |
585 | * attacks in the high resolution timer case. This is |
586 | * compliant with the old way of self-restarting |
587 | * itimers, as the SIGALRM is a legacy signal and only |
588 | * queued once. Changing the restart behaviour to |
589 | * restart the timer in the signal dequeue path is |
590 | * reducing the timer noise on heavy loaded !highres |
591 | * systems too. |
592 | */ |
593 | if (unlikely(signr == SIGALRM)) { |
594 | struct hrtimer *tmr = &tsk->signal->real_timer; |
595 | |
596 | if (!hrtimer_is_queued(tmr) && |
597 | tsk->signal->it_real_incr.tv64 != 0) { |
598 | hrtimer_forward(tmr, tmr->base->get_time(), |
599 | tsk->signal->it_real_incr); |
600 | hrtimer_restart(tmr); |
601 | } |
602 | } |
603 | } |
604 | |
605 | recalc_sigpending(); |
606 | if (!signr) |
607 | return 0; |
608 | |
609 | if (unlikely(sig_kernel_stop(signr))) { |
610 | /* |
611 | * Set a marker that we have dequeued a stop signal. Our |
612 | * caller might release the siglock and then the pending |
613 | * stop signal it is about to process is no longer in the |
614 | * pending bitmasks, but must still be cleared by a SIGCONT |
615 | * (and overruled by a SIGKILL). So those cases clear this |
616 | * shared flag after we've set it. Note that this flag may |
617 | * remain set after the signal we return is ignored or |
618 | * handled. That doesn't matter because its only purpose |
619 | * is to alert stop-signal processing code when another |
620 | * processor has come along and cleared the flag. |
621 | */ |
622 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
623 | } |
624 | if (resched_timer) { |
625 | /* |
626 | * Release the siglock to ensure proper locking order |
627 | * of timer locks outside of siglocks. Note, we leave |
628 | * irqs disabled here, since the posix-timers code is |
629 | * about to disable them again anyway. |
630 | */ |
631 | spin_unlock(&tsk->sighand->siglock); |
632 | do_schedule_next_timer(info); |
633 | spin_lock(&tsk->sighand->siglock); |
634 | } |
635 | return signr; |
636 | } |
637 | |
638 | /* |
639 | * Tell a process that it has a new active signal.. |
640 | * |
641 | * NOTE! we rely on the previous spin_lock to |
642 | * lock interrupts for us! We can only be called with |
643 | * "siglock" held, and the local interrupt must |
644 | * have been disabled when that got acquired! |
645 | * |
646 | * No need to set need_resched since signal event passing |
647 | * goes through ->blocked |
648 | */ |
649 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
650 | { |
651 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
652 | /* |
653 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
654 | * case. We don't check t->state here because there is a race with it |
655 | * executing another processor and just now entering stopped state. |
656 | * By using wake_up_state, we ensure the process will wake up and |
657 | * handle its death signal. |
658 | */ |
659 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
660 | kick_process(t); |
661 | } |
662 | |
663 | /* |
664 | * Remove signals in mask from the pending set and queue. |
665 | * Returns 1 if any signals were found. |
666 | * |
667 | * All callers must be holding the siglock. |
668 | */ |
669 | static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) |
670 | { |
671 | struct sigqueue *q, *n; |
672 | sigset_t m; |
673 | |
674 | sigandsets(&m, mask, &s->signal); |
675 | if (sigisemptyset(&m)) |
676 | return 0; |
677 | |
678 | sigandnsets(&s->signal, &s->signal, mask); |
679 | list_for_each_entry_safe(q, n, &s->list, list) { |
680 | if (sigismember(mask, q->info.si_signo)) { |
681 | list_del_init(&q->list); |
682 | __sigqueue_free(q); |
683 | } |
684 | } |
685 | return 1; |
686 | } |
687 | |
688 | static inline int is_si_special(const struct siginfo *info) |
689 | { |
690 | return info <= SEND_SIG_FORCED; |
691 | } |
692 | |
693 | static inline bool si_fromuser(const struct siginfo *info) |
694 | { |
695 | return info == SEND_SIG_NOINFO || |
696 | (!is_si_special(info) && SI_FROMUSER(info)); |
697 | } |
698 | |
699 | static int dequeue_synchronous_signal(siginfo_t *info) |
700 | { |
701 | struct task_struct *tsk = current; |
702 | struct sigpending *pending = &tsk->pending; |
703 | struct sigqueue *q, *sync = NULL; |
704 | |
705 | /* |
706 | * Might a synchronous signal be in the queue? |
707 | */ |
708 | if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) |
709 | return 0; |
710 | |
711 | /* |
712 | * Return the first synchronous signal in the queue. |
713 | */ |
714 | list_for_each_entry(q, &pending->list, list) { |
715 | /* Synchronous signals have a postive si_code */ |
716 | if ((q->info.si_code > SI_USER) && |
717 | (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { |
718 | sync = q; |
719 | goto next; |
720 | } |
721 | } |
722 | return 0; |
723 | next: |
724 | /* |
725 | * Check if there is another siginfo for the same signal. |
726 | */ |
727 | list_for_each_entry_continue(q, &pending->list, list) { |
728 | if (q->info.si_signo == sync->info.si_signo) |
729 | goto still_pending; |
730 | } |
731 | |
732 | sigdelset(&pending->signal, sync->info.si_signo); |
733 | recalc_sigpending(); |
734 | still_pending: |
735 | list_del_init(&sync->list); |
736 | copy_siginfo(info, &sync->info); |
737 | __sigqueue_free(sync); |
738 | return info->si_signo; |
739 | } |
740 | |
741 | /* |
742 | * called with RCU read lock from check_kill_permission() |
743 | */ |
744 | static int kill_ok_by_cred(struct task_struct *t) |
745 | { |
746 | const struct cred *cred = current_cred(); |
747 | const struct cred *tcred = __task_cred(t); |
748 | |
749 | if (uid_eq(cred->euid, tcred->suid) || |
750 | uid_eq(cred->euid, tcred->uid) || |
751 | uid_eq(cred->uid, tcred->suid) || |
752 | uid_eq(cred->uid, tcred->uid)) |
753 | return 1; |
754 | |
755 | if (ns_capable(tcred->user_ns, CAP_KILL)) |
756 | return 1; |
757 | |
758 | return 0; |
759 | } |
760 | |
761 | /* |
762 | * Bad permissions for sending the signal |
763 | * - the caller must hold the RCU read lock |
764 | */ |
765 | static int check_kill_permission(int sig, struct siginfo *info, |
766 | struct task_struct *t) |
767 | { |
768 | struct pid *sid; |
769 | int error; |
770 | |
771 | if (!valid_signal(sig)) |
772 | return -EINVAL; |
773 | |
774 | if (!si_fromuser(info)) |
775 | return 0; |
776 | |
777 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
778 | if (error) |
779 | return error; |
780 | |
781 | if (!same_thread_group(current, t) && |
782 | !kill_ok_by_cred(t)) { |
783 | switch (sig) { |
784 | case SIGCONT: |
785 | sid = task_session(t); |
786 | /* |
787 | * We don't return the error if sid == NULL. The |
788 | * task was unhashed, the caller must notice this. |
789 | */ |
790 | if (!sid || sid == task_session(current)) |
791 | break; |
792 | default: |
793 | return -EPERM; |
794 | } |
795 | } |
796 | |
797 | return security_task_kill(t, info, sig, 0); |
798 | } |
799 | |
800 | /** |
801 | * ptrace_trap_notify - schedule trap to notify ptracer |
802 | * @t: tracee wanting to notify tracer |
803 | * |
804 | * This function schedules sticky ptrace trap which is cleared on the next |
805 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by |
806 | * ptracer. |
807 | * |
808 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
809 | * ptracer is listening for events, tracee is woken up so that it can |
810 | * re-trap for the new event. If trapped otherwise, STOP trap will be |
811 | * eventually taken without returning to userland after the existing traps |
812 | * are finished by PTRACE_CONT. |
813 | * |
814 | * CONTEXT: |
815 | * Must be called with @task->sighand->siglock held. |
816 | */ |
817 | static void ptrace_trap_notify(struct task_struct *t) |
818 | { |
819 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); |
820 | assert_spin_locked(&t->sighand->siglock); |
821 | |
822 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
823 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
824 | } |
825 | |
826 | /* |
827 | * Handle magic process-wide effects of stop/continue signals. Unlike |
828 | * the signal actions, these happen immediately at signal-generation |
829 | * time regardless of blocking, ignoring, or handling. This does the |
830 | * actual continuing for SIGCONT, but not the actual stopping for stop |
831 | * signals. The process stop is done as a signal action for SIG_DFL. |
832 | * |
833 | * Returns true if the signal should be actually delivered, otherwise |
834 | * it should be dropped. |
835 | */ |
836 | static bool prepare_signal(int sig, struct task_struct *p, bool force) |
837 | { |
838 | struct signal_struct *signal = p->signal; |
839 | struct task_struct *t; |
840 | sigset_t flush; |
841 | |
842 | if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { |
843 | if (!(signal->flags & SIGNAL_GROUP_EXIT)) |
844 | return sig == SIGKILL; |
845 | /* |
846 | * The process is in the middle of dying, nothing to do. |
847 | */ |
848 | } else if (sig_kernel_stop(sig)) { |
849 | /* |
850 | * This is a stop signal. Remove SIGCONT from all queues. |
851 | */ |
852 | siginitset(&flush, sigmask(SIGCONT)); |
853 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
854 | for_each_thread(p, t) |
855 | flush_sigqueue_mask(&flush, &t->pending); |
856 | } else if (sig == SIGCONT) { |
857 | unsigned int why; |
858 | /* |
859 | * Remove all stop signals from all queues, wake all threads. |
860 | */ |
861 | siginitset(&flush, SIG_KERNEL_STOP_MASK); |
862 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
863 | for_each_thread(p, t) { |
864 | flush_sigqueue_mask(&flush, &t->pending); |
865 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
866 | if (likely(!(t->ptrace & PT_SEIZED))) |
867 | wake_up_state(t, __TASK_STOPPED); |
868 | else |
869 | ptrace_trap_notify(t); |
870 | } |
871 | |
872 | /* |
873 | * Notify the parent with CLD_CONTINUED if we were stopped. |
874 | * |
875 | * If we were in the middle of a group stop, we pretend it |
876 | * was already finished, and then continued. Since SIGCHLD |
877 | * doesn't queue we report only CLD_STOPPED, as if the next |
878 | * CLD_CONTINUED was dropped. |
879 | */ |
880 | why = 0; |
881 | if (signal->flags & SIGNAL_STOP_STOPPED) |
882 | why |= SIGNAL_CLD_CONTINUED; |
883 | else if (signal->group_stop_count) |
884 | why |= SIGNAL_CLD_STOPPED; |
885 | |
886 | if (why) { |
887 | /* |
888 | * The first thread which returns from do_signal_stop() |
889 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
890 | * notify its parent. See get_signal_to_deliver(). |
891 | */ |
892 | signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); |
893 | signal->group_stop_count = 0; |
894 | signal->group_exit_code = 0; |
895 | } |
896 | } |
897 | |
898 | return !sig_ignored(p, sig, force); |
899 | } |
900 | |
901 | /* |
902 | * Test if P wants to take SIG. After we've checked all threads with this, |
903 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
904 | * blocking SIG were ruled out because they are not running and already |
905 | * have pending signals. Such threads will dequeue from the shared queue |
906 | * as soon as they're available, so putting the signal on the shared queue |
907 | * will be equivalent to sending it to one such thread. |
908 | */ |
909 | static inline int wants_signal(int sig, struct task_struct *p) |
910 | { |
911 | if (sigismember(&p->blocked, sig)) |
912 | return 0; |
913 | if (p->flags & PF_EXITING) |
914 | return 0; |
915 | if (sig == SIGKILL) |
916 | return 1; |
917 | if (task_is_stopped_or_traced(p)) |
918 | return 0; |
919 | return task_curr(p) || !signal_pending(p); |
920 | } |
921 | |
922 | static void complete_signal(int sig, struct task_struct *p, int group) |
923 | { |
924 | struct signal_struct *signal = p->signal; |
925 | struct task_struct *t; |
926 | |
927 | /* |
928 | * Now find a thread we can wake up to take the signal off the queue. |
929 | * |
930 | * If the main thread wants the signal, it gets first crack. |
931 | * Probably the least surprising to the average bear. |
932 | */ |
933 | if (wants_signal(sig, p)) |
934 | t = p; |
935 | else if (!group || thread_group_empty(p)) |
936 | /* |
937 | * There is just one thread and it does not need to be woken. |
938 | * It will dequeue unblocked signals before it runs again. |
939 | */ |
940 | return; |
941 | else { |
942 | /* |
943 | * Otherwise try to find a suitable thread. |
944 | */ |
945 | t = signal->curr_target; |
946 | while (!wants_signal(sig, t)) { |
947 | t = next_thread(t); |
948 | if (t == signal->curr_target) |
949 | /* |
950 | * No thread needs to be woken. |
951 | * Any eligible threads will see |
952 | * the signal in the queue soon. |
953 | */ |
954 | return; |
955 | } |
956 | signal->curr_target = t; |
957 | } |
958 | |
959 | /* |
960 | * Found a killable thread. If the signal will be fatal, |
961 | * then start taking the whole group down immediately. |
962 | */ |
963 | if (sig_fatal(p, sig) && |
964 | !(signal->flags & SIGNAL_GROUP_EXIT) && |
965 | !sigismember(&t->real_blocked, sig) && |
966 | (sig == SIGKILL || !p->ptrace)) { |
967 | /* |
968 | * This signal will be fatal to the whole group. |
969 | */ |
970 | if (!sig_kernel_coredump(sig)) { |
971 | /* |
972 | * Start a group exit and wake everybody up. |
973 | * This way we don't have other threads |
974 | * running and doing things after a slower |
975 | * thread has the fatal signal pending. |
976 | */ |
977 | signal->flags = SIGNAL_GROUP_EXIT; |
978 | signal->group_exit_code = sig; |
979 | signal->group_stop_count = 0; |
980 | t = p; |
981 | do { |
982 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
983 | sigaddset(&t->pending.signal, SIGKILL); |
984 | signal_wake_up(t, 1); |
985 | } while_each_thread(p, t); |
986 | return; |
987 | } |
988 | } |
989 | |
990 | /* |
991 | * The signal is already in the shared-pending queue. |
992 | * Tell the chosen thread to wake up and dequeue it. |
993 | */ |
994 | signal_wake_up(t, sig == SIGKILL); |
995 | return; |
996 | } |
997 | |
998 | static inline int legacy_queue(struct sigpending *signals, int sig) |
999 | { |
1000 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); |
1001 | } |
1002 | |
1003 | #ifdef CONFIG_USER_NS |
1004 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) |
1005 | { |
1006 | if (current_user_ns() == task_cred_xxx(t, user_ns)) |
1007 | return; |
1008 | |
1009 | if (SI_FROMKERNEL(info)) |
1010 | return; |
1011 | |
1012 | rcu_read_lock(); |
1013 | info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), |
1014 | make_kuid(current_user_ns(), info->si_uid)); |
1015 | rcu_read_unlock(); |
1016 | } |
1017 | #else |
1018 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) |
1019 | { |
1020 | return; |
1021 | } |
1022 | #endif |
1023 | |
1024 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1025 | int group, int from_ancestor_ns) |
1026 | { |
1027 | struct sigpending *pending; |
1028 | struct sigqueue *q; |
1029 | int override_rlimit; |
1030 | int ret = 0, result; |
1031 | |
1032 | assert_spin_locked(&t->sighand->siglock); |
1033 | |
1034 | result = TRACE_SIGNAL_IGNORED; |
1035 | if (!prepare_signal(sig, t, |
1036 | from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED))) |
1037 | goto ret; |
1038 | |
1039 | pending = group ? &t->signal->shared_pending : &t->pending; |
1040 | /* |
1041 | * Short-circuit ignored signals and support queuing |
1042 | * exactly one non-rt signal, so that we can get more |
1043 | * detailed information about the cause of the signal. |
1044 | */ |
1045 | result = TRACE_SIGNAL_ALREADY_PENDING; |
1046 | if (legacy_queue(pending, sig)) |
1047 | goto ret; |
1048 | |
1049 | result = TRACE_SIGNAL_DELIVERED; |
1050 | /* |
1051 | * fast-pathed signals for kernel-internal things like SIGSTOP |
1052 | * or SIGKILL. |
1053 | */ |
1054 | if (info == SEND_SIG_FORCED) |
1055 | goto out_set; |
1056 | |
1057 | /* |
1058 | * Real-time signals must be queued if sent by sigqueue, or |
1059 | * some other real-time mechanism. It is implementation |
1060 | * defined whether kill() does so. We attempt to do so, on |
1061 | * the principle of least surprise, but since kill is not |
1062 | * allowed to fail with EAGAIN when low on memory we just |
1063 | * make sure at least one signal gets delivered and don't |
1064 | * pass on the info struct. |
1065 | */ |
1066 | if (sig < SIGRTMIN) |
1067 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
1068 | else |
1069 | override_rlimit = 0; |
1070 | |
1071 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
1072 | override_rlimit); |
1073 | if (q) { |
1074 | list_add_tail(&q->list, &pending->list); |
1075 | switch ((unsigned long) info) { |
1076 | case (unsigned long) SEND_SIG_NOINFO: |
1077 | q->info.si_signo = sig; |
1078 | q->info.si_errno = 0; |
1079 | q->info.si_code = SI_USER; |
1080 | q->info.si_pid = task_tgid_nr_ns(current, |
1081 | task_active_pid_ns(t)); |
1082 | q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1083 | break; |
1084 | case (unsigned long) SEND_SIG_PRIV: |
1085 | q->info.si_signo = sig; |
1086 | q->info.si_errno = 0; |
1087 | q->info.si_code = SI_KERNEL; |
1088 | q->info.si_pid = 0; |
1089 | q->info.si_uid = 0; |
1090 | break; |
1091 | default: |
1092 | copy_siginfo(&q->info, info); |
1093 | if (from_ancestor_ns) |
1094 | q->info.si_pid = 0; |
1095 | break; |
1096 | } |
1097 | |
1098 | userns_fixup_signal_uid(&q->info, t); |
1099 | |
1100 | } else if (!is_si_special(info)) { |
1101 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
1102 | /* |
1103 | * Queue overflow, abort. We may abort if the |
1104 | * signal was rt and sent by user using something |
1105 | * other than kill(). |
1106 | */ |
1107 | result = TRACE_SIGNAL_OVERFLOW_FAIL; |
1108 | ret = -EAGAIN; |
1109 | goto ret; |
1110 | } else { |
1111 | /* |
1112 | * This is a silent loss of information. We still |
1113 | * send the signal, but the *info bits are lost. |
1114 | */ |
1115 | result = TRACE_SIGNAL_LOSE_INFO; |
1116 | } |
1117 | } |
1118 | |
1119 | out_set: |
1120 | signalfd_notify(t, sig); |
1121 | sigaddset(&pending->signal, sig); |
1122 | complete_signal(sig, t, group); |
1123 | ret: |
1124 | trace_signal_generate(sig, info, t, group, result); |
1125 | return ret; |
1126 | } |
1127 | |
1128 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1129 | int group) |
1130 | { |
1131 | int from_ancestor_ns = 0; |
1132 | |
1133 | #ifdef CONFIG_PID_NS |
1134 | from_ancestor_ns = si_fromuser(info) && |
1135 | !task_pid_nr_ns(current, task_active_pid_ns(t)); |
1136 | #endif |
1137 | |
1138 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
1139 | } |
1140 | |
1141 | static void print_fatal_signal(int signr) |
1142 | { |
1143 | struct pt_regs *regs = signal_pt_regs(); |
1144 | pr_info("potentially unexpected fatal signal %d.\n", signr); |
1145 | |
1146 | #if defined(__i386__) && !defined(__arch_um__) |
1147 | pr_info("code at %08lx: ", regs->ip); |
1148 | { |
1149 | int i; |
1150 | for (i = 0; i < 16; i++) { |
1151 | unsigned char insn; |
1152 | |
1153 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1154 | break; |
1155 | pr_cont("%02x ", insn); |
1156 | } |
1157 | } |
1158 | pr_cont("\n"); |
1159 | #endif |
1160 | preempt_disable(); |
1161 | show_regs(regs); |
1162 | preempt_enable(); |
1163 | } |
1164 | |
1165 | static int __init setup_print_fatal_signals(char *str) |
1166 | { |
1167 | get_option (&str, &print_fatal_signals); |
1168 | |
1169 | return 1; |
1170 | } |
1171 | |
1172 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
1173 | |
1174 | int |
1175 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1176 | { |
1177 | return send_signal(sig, info, p, 1); |
1178 | } |
1179 | |
1180 | static int |
1181 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1182 | { |
1183 | return send_signal(sig, info, t, 0); |
1184 | } |
1185 | |
1186 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
1187 | bool group) |
1188 | { |
1189 | unsigned long flags; |
1190 | int ret = -ESRCH; |
1191 | |
1192 | if (lock_task_sighand(p, &flags)) { |
1193 | ret = send_signal(sig, info, p, group); |
1194 | unlock_task_sighand(p, &flags); |
1195 | } |
1196 | |
1197 | return ret; |
1198 | } |
1199 | |
1200 | /* |
1201 | * Force a signal that the process can't ignore: if necessary |
1202 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
1203 | * |
1204 | * Note: If we unblock the signal, we always reset it to SIG_DFL, |
1205 | * since we do not want to have a signal handler that was blocked |
1206 | * be invoked when user space had explicitly blocked it. |
1207 | * |
1208 | * We don't want to have recursive SIGSEGV's etc, for example, |
1209 | * that is why we also clear SIGNAL_UNKILLABLE. |
1210 | */ |
1211 | int |
1212 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1213 | { |
1214 | unsigned long int flags; |
1215 | int ret, blocked, ignored; |
1216 | struct k_sigaction *action; |
1217 | |
1218 | spin_lock_irqsave(&t->sighand->siglock, flags); |
1219 | action = &t->sighand->action[sig-1]; |
1220 | ignored = action->sa.sa_handler == SIG_IGN; |
1221 | blocked = sigismember(&t->blocked, sig); |
1222 | if (blocked || ignored) { |
1223 | action->sa.sa_handler = SIG_DFL; |
1224 | if (blocked) { |
1225 | sigdelset(&t->blocked, sig); |
1226 | recalc_sigpending_and_wake(t); |
1227 | } |
1228 | } |
1229 | if (action->sa.sa_handler == SIG_DFL) |
1230 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1231 | ret = specific_send_sig_info(sig, info, t); |
1232 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1233 | |
1234 | return ret; |
1235 | } |
1236 | |
1237 | /* |
1238 | * Nuke all other threads in the group. |
1239 | */ |
1240 | int zap_other_threads(struct task_struct *p) |
1241 | { |
1242 | struct task_struct *t = p; |
1243 | int count = 0; |
1244 | |
1245 | p->signal->group_stop_count = 0; |
1246 | |
1247 | while_each_thread(p, t) { |
1248 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
1249 | count++; |
1250 | |
1251 | /* Don't bother with already dead threads */ |
1252 | if (t->exit_state) |
1253 | continue; |
1254 | sigaddset(&t->pending.signal, SIGKILL); |
1255 | signal_wake_up(t, 1); |
1256 | } |
1257 | |
1258 | return count; |
1259 | } |
1260 | |
1261 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1262 | unsigned long *flags) |
1263 | { |
1264 | struct sighand_struct *sighand; |
1265 | |
1266 | for (;;) { |
1267 | /* |
1268 | * Disable interrupts early to avoid deadlocks. |
1269 | * See rcu_read_unlock() comment header for details. |
1270 | */ |
1271 | local_irq_save(*flags); |
1272 | rcu_read_lock(); |
1273 | sighand = rcu_dereference(tsk->sighand); |
1274 | if (unlikely(sighand == NULL)) { |
1275 | rcu_read_unlock(); |
1276 | local_irq_restore(*flags); |
1277 | break; |
1278 | } |
1279 | /* |
1280 | * This sighand can be already freed and even reused, but |
1281 | * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which |
1282 | * initializes ->siglock: this slab can't go away, it has |
1283 | * the same object type, ->siglock can't be reinitialized. |
1284 | * |
1285 | * We need to ensure that tsk->sighand is still the same |
1286 | * after we take the lock, we can race with de_thread() or |
1287 | * __exit_signal(). In the latter case the next iteration |
1288 | * must see ->sighand == NULL. |
1289 | */ |
1290 | spin_lock(&sighand->siglock); |
1291 | if (likely(sighand == tsk->sighand)) { |
1292 | rcu_read_unlock(); |
1293 | break; |
1294 | } |
1295 | spin_unlock(&sighand->siglock); |
1296 | rcu_read_unlock(); |
1297 | local_irq_restore(*flags); |
1298 | } |
1299 | |
1300 | return sighand; |
1301 | } |
1302 | |
1303 | /* |
1304 | * send signal info to all the members of a group |
1305 | */ |
1306 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1307 | { |
1308 | int ret; |
1309 | |
1310 | rcu_read_lock(); |
1311 | ret = check_kill_permission(sig, info, p); |
1312 | rcu_read_unlock(); |
1313 | |
1314 | if (!ret && sig) |
1315 | ret = do_send_sig_info(sig, info, p, true); |
1316 | |
1317 | return ret; |
1318 | } |
1319 | |
1320 | /* |
1321 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1322 | * control characters do (^C, ^Z etc) |
1323 | * - the caller must hold at least a readlock on tasklist_lock |
1324 | */ |
1325 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1326 | { |
1327 | struct task_struct *p = NULL; |
1328 | int retval, success; |
1329 | |
1330 | success = 0; |
1331 | retval = -ESRCH; |
1332 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1333 | int err = group_send_sig_info(sig, info, p); |
1334 | success |= !err; |
1335 | retval = err; |
1336 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1337 | return success ? 0 : retval; |
1338 | } |
1339 | |
1340 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1341 | { |
1342 | int error = -ESRCH; |
1343 | struct task_struct *p; |
1344 | |
1345 | for (;;) { |
1346 | rcu_read_lock(); |
1347 | p = pid_task(pid, PIDTYPE_PID); |
1348 | if (p) |
1349 | error = group_send_sig_info(sig, info, p); |
1350 | rcu_read_unlock(); |
1351 | if (likely(!p || error != -ESRCH)) |
1352 | return error; |
1353 | |
1354 | /* |
1355 | * The task was unhashed in between, try again. If it |
1356 | * is dead, pid_task() will return NULL, if we race with |
1357 | * de_thread() it will find the new leader. |
1358 | */ |
1359 | } |
1360 | } |
1361 | |
1362 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1363 | { |
1364 | int error; |
1365 | rcu_read_lock(); |
1366 | error = kill_pid_info(sig, info, find_vpid(pid)); |
1367 | rcu_read_unlock(); |
1368 | return error; |
1369 | } |
1370 | |
1371 | static int kill_as_cred_perm(const struct cred *cred, |
1372 | struct task_struct *target) |
1373 | { |
1374 | const struct cred *pcred = __task_cred(target); |
1375 | if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && |
1376 | !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) |
1377 | return 0; |
1378 | return 1; |
1379 | } |
1380 | |
1381 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
1382 | int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, |
1383 | const struct cred *cred, u32 secid) |
1384 | { |
1385 | int ret = -EINVAL; |
1386 | struct task_struct *p; |
1387 | unsigned long flags; |
1388 | |
1389 | if (!valid_signal(sig)) |
1390 | return ret; |
1391 | |
1392 | rcu_read_lock(); |
1393 | p = pid_task(pid, PIDTYPE_PID); |
1394 | if (!p) { |
1395 | ret = -ESRCH; |
1396 | goto out_unlock; |
1397 | } |
1398 | if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { |
1399 | ret = -EPERM; |
1400 | goto out_unlock; |
1401 | } |
1402 | ret = security_task_kill(p, info, sig, secid); |
1403 | if (ret) |
1404 | goto out_unlock; |
1405 | |
1406 | if (sig) { |
1407 | if (lock_task_sighand(p, &flags)) { |
1408 | ret = __send_signal(sig, info, p, 1, 0); |
1409 | unlock_task_sighand(p, &flags); |
1410 | } else |
1411 | ret = -ESRCH; |
1412 | } |
1413 | out_unlock: |
1414 | rcu_read_unlock(); |
1415 | return ret; |
1416 | } |
1417 | EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); |
1418 | |
1419 | /* |
1420 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
1421 | * |
1422 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
1423 | * is probably wrong. Should make it like BSD or SYSV. |
1424 | */ |
1425 | |
1426 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1427 | { |
1428 | int ret; |
1429 | |
1430 | if (pid > 0) { |
1431 | rcu_read_lock(); |
1432 | ret = kill_pid_info(sig, info, find_vpid(pid)); |
1433 | rcu_read_unlock(); |
1434 | return ret; |
1435 | } |
1436 | |
1437 | /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ |
1438 | if (pid == INT_MIN) |
1439 | return -ESRCH; |
1440 | |
1441 | read_lock(&tasklist_lock); |
1442 | if (pid != -1) { |
1443 | ret = __kill_pgrp_info(sig, info, |
1444 | pid ? find_vpid(-pid) : task_pgrp(current)); |
1445 | } else { |
1446 | int retval = 0, count = 0; |
1447 | struct task_struct * p; |
1448 | |
1449 | for_each_process(p) { |
1450 | if (task_pid_vnr(p) > 1 && |
1451 | !same_thread_group(p, current)) { |
1452 | int err = group_send_sig_info(sig, info, p); |
1453 | ++count; |
1454 | if (err != -EPERM) |
1455 | retval = err; |
1456 | } |
1457 | } |
1458 | ret = count ? retval : -ESRCH; |
1459 | } |
1460 | read_unlock(&tasklist_lock); |
1461 | |
1462 | return ret; |
1463 | } |
1464 | |
1465 | /* |
1466 | * These are for backward compatibility with the rest of the kernel source. |
1467 | */ |
1468 | |
1469 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1470 | { |
1471 | /* |
1472 | * Make sure legacy kernel users don't send in bad values |
1473 | * (normal paths check this in check_kill_permission). |
1474 | */ |
1475 | if (!valid_signal(sig)) |
1476 | return -EINVAL; |
1477 | |
1478 | return do_send_sig_info(sig, info, p, false); |
1479 | } |
1480 | |
1481 | #define __si_special(priv) \ |
1482 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1483 | |
1484 | int |
1485 | send_sig(int sig, struct task_struct *p, int priv) |
1486 | { |
1487 | return send_sig_info(sig, __si_special(priv), p); |
1488 | } |
1489 | |
1490 | void |
1491 | force_sig(int sig, struct task_struct *p) |
1492 | { |
1493 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1494 | } |
1495 | |
1496 | /* |
1497 | * When things go south during signal handling, we |
1498 | * will force a SIGSEGV. And if the signal that caused |
1499 | * the problem was already a SIGSEGV, we'll want to |
1500 | * make sure we don't even try to deliver the signal.. |
1501 | */ |
1502 | int |
1503 | force_sigsegv(int sig, struct task_struct *p) |
1504 | { |
1505 | if (sig == SIGSEGV) { |
1506 | unsigned long flags; |
1507 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1508 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; |
1509 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1510 | } |
1511 | force_sig(SIGSEGV, p); |
1512 | return 0; |
1513 | } |
1514 | |
1515 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1516 | { |
1517 | int ret; |
1518 | |
1519 | read_lock(&tasklist_lock); |
1520 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); |
1521 | read_unlock(&tasklist_lock); |
1522 | |
1523 | return ret; |
1524 | } |
1525 | EXPORT_SYMBOL(kill_pgrp); |
1526 | |
1527 | int kill_pid(struct pid *pid, int sig, int priv) |
1528 | { |
1529 | return kill_pid_info(sig, __si_special(priv), pid); |
1530 | } |
1531 | EXPORT_SYMBOL(kill_pid); |
1532 | |
1533 | /* |
1534 | * These functions support sending signals using preallocated sigqueue |
1535 | * structures. This is needed "because realtime applications cannot |
1536 | * afford to lose notifications of asynchronous events, like timer |
1537 | * expirations or I/O completions". In the case of POSIX Timers |
1538 | * we allocate the sigqueue structure from the timer_create. If this |
1539 | * allocation fails we are able to report the failure to the application |
1540 | * with an EAGAIN error. |
1541 | */ |
1542 | struct sigqueue *sigqueue_alloc(void) |
1543 | { |
1544 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1545 | |
1546 | if (q) |
1547 | q->flags |= SIGQUEUE_PREALLOC; |
1548 | |
1549 | return q; |
1550 | } |
1551 | |
1552 | void sigqueue_free(struct sigqueue *q) |
1553 | { |
1554 | unsigned long flags; |
1555 | spinlock_t *lock = ¤t->sighand->siglock; |
1556 | |
1557 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1558 | /* |
1559 | * We must hold ->siglock while testing q->list |
1560 | * to serialize with collect_signal() or with |
1561 | * __exit_signal()->flush_sigqueue(). |
1562 | */ |
1563 | spin_lock_irqsave(lock, flags); |
1564 | q->flags &= ~SIGQUEUE_PREALLOC; |
1565 | /* |
1566 | * If it is queued it will be freed when dequeued, |
1567 | * like the "regular" sigqueue. |
1568 | */ |
1569 | if (!list_empty(&q->list)) |
1570 | q = NULL; |
1571 | spin_unlock_irqrestore(lock, flags); |
1572 | |
1573 | if (q) |
1574 | __sigqueue_free(q); |
1575 | } |
1576 | |
1577 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
1578 | { |
1579 | int sig = q->info.si_signo; |
1580 | struct sigpending *pending; |
1581 | unsigned long flags; |
1582 | int ret, result; |
1583 | |
1584 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1585 | |
1586 | ret = -1; |
1587 | if (!likely(lock_task_sighand(t, &flags))) |
1588 | goto ret; |
1589 | |
1590 | ret = 1; /* the signal is ignored */ |
1591 | result = TRACE_SIGNAL_IGNORED; |
1592 | if (!prepare_signal(sig, t, false)) |
1593 | goto out; |
1594 | |
1595 | ret = 0; |
1596 | if (unlikely(!list_empty(&q->list))) { |
1597 | /* |
1598 | * If an SI_TIMER entry is already queue just increment |
1599 | * the overrun count. |
1600 | */ |
1601 | BUG_ON(q->info.si_code != SI_TIMER); |
1602 | q->info.si_overrun++; |
1603 | result = TRACE_SIGNAL_ALREADY_PENDING; |
1604 | goto out; |
1605 | } |
1606 | q->info.si_overrun = 0; |
1607 | |
1608 | signalfd_notify(t, sig); |
1609 | pending = group ? &t->signal->shared_pending : &t->pending; |
1610 | list_add_tail(&q->list, &pending->list); |
1611 | sigaddset(&pending->signal, sig); |
1612 | complete_signal(sig, t, group); |
1613 | result = TRACE_SIGNAL_DELIVERED; |
1614 | out: |
1615 | trace_signal_generate(sig, &q->info, t, group, result); |
1616 | unlock_task_sighand(t, &flags); |
1617 | ret: |
1618 | return ret; |
1619 | } |
1620 | |
1621 | /* |
1622 | * Let a parent know about the death of a child. |
1623 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1624 | * |
1625 | * Returns true if our parent ignored us and so we've switched to |
1626 | * self-reaping. |
1627 | */ |
1628 | bool do_notify_parent(struct task_struct *tsk, int sig) |
1629 | { |
1630 | struct siginfo info; |
1631 | unsigned long flags; |
1632 | struct sighand_struct *psig; |
1633 | bool autoreap = false; |
1634 | cputime_t utime, stime; |
1635 | |
1636 | BUG_ON(sig == -1); |
1637 | |
1638 | /* do_notify_parent_cldstop should have been called instead. */ |
1639 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1640 | |
1641 | BUG_ON(!tsk->ptrace && |
1642 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1643 | |
1644 | if (sig != SIGCHLD) { |
1645 | /* |
1646 | * This is only possible if parent == real_parent. |
1647 | * Check if it has changed security domain. |
1648 | */ |
1649 | if (tsk->parent_exec_id != tsk->parent->self_exec_id) |
1650 | sig = SIGCHLD; |
1651 | } |
1652 | |
1653 | info.si_signo = sig; |
1654 | info.si_errno = 0; |
1655 | /* |
1656 | * We are under tasklist_lock here so our parent is tied to |
1657 | * us and cannot change. |
1658 | * |
1659 | * task_active_pid_ns will always return the same pid namespace |
1660 | * until a task passes through release_task. |
1661 | * |
1662 | * write_lock() currently calls preempt_disable() which is the |
1663 | * same as rcu_read_lock(), but according to Oleg, this is not |
1664 | * correct to rely on this |
1665 | */ |
1666 | rcu_read_lock(); |
1667 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); |
1668 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), |
1669 | task_uid(tsk)); |
1670 | rcu_read_unlock(); |
1671 | |
1672 | task_cputime(tsk, &utime, &stime); |
1673 | info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); |
1674 | info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); |
1675 | |
1676 | info.si_status = tsk->exit_code & 0x7f; |
1677 | if (tsk->exit_code & 0x80) |
1678 | info.si_code = CLD_DUMPED; |
1679 | else if (tsk->exit_code & 0x7f) |
1680 | info.si_code = CLD_KILLED; |
1681 | else { |
1682 | info.si_code = CLD_EXITED; |
1683 | info.si_status = tsk->exit_code >> 8; |
1684 | } |
1685 | |
1686 | psig = tsk->parent->sighand; |
1687 | spin_lock_irqsave(&psig->siglock, flags); |
1688 | if (!tsk->ptrace && sig == SIGCHLD && |
1689 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1690 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
1691 | /* |
1692 | * We are exiting and our parent doesn't care. POSIX.1 |
1693 | * defines special semantics for setting SIGCHLD to SIG_IGN |
1694 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
1695 | * automatically and not left for our parent's wait4 call. |
1696 | * Rather than having the parent do it as a magic kind of |
1697 | * signal handler, we just set this to tell do_exit that we |
1698 | * can be cleaned up without becoming a zombie. Note that |
1699 | * we still call __wake_up_parent in this case, because a |
1700 | * blocked sys_wait4 might now return -ECHILD. |
1701 | * |
1702 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
1703 | * is implementation-defined: we do (if you don't want |
1704 | * it, just use SIG_IGN instead). |
1705 | */ |
1706 | autoreap = true; |
1707 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1708 | sig = 0; |
1709 | } |
1710 | if (valid_signal(sig) && sig) |
1711 | __group_send_sig_info(sig, &info, tsk->parent); |
1712 | __wake_up_parent(tsk, tsk->parent); |
1713 | spin_unlock_irqrestore(&psig->siglock, flags); |
1714 | |
1715 | return autoreap; |
1716 | } |
1717 | |
1718 | /** |
1719 | * do_notify_parent_cldstop - notify parent of stopped/continued state change |
1720 | * @tsk: task reporting the state change |
1721 | * @for_ptracer: the notification is for ptracer |
1722 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report |
1723 | * |
1724 | * Notify @tsk's parent that the stopped/continued state has changed. If |
1725 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. |
1726 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. |
1727 | * |
1728 | * CONTEXT: |
1729 | * Must be called with tasklist_lock at least read locked. |
1730 | */ |
1731 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
1732 | bool for_ptracer, int why) |
1733 | { |
1734 | struct siginfo info; |
1735 | unsigned long flags; |
1736 | struct task_struct *parent; |
1737 | struct sighand_struct *sighand; |
1738 | cputime_t utime, stime; |
1739 | |
1740 | if (for_ptracer) { |
1741 | parent = tsk->parent; |
1742 | } else { |
1743 | tsk = tsk->group_leader; |
1744 | parent = tsk->real_parent; |
1745 | } |
1746 | |
1747 | info.si_signo = SIGCHLD; |
1748 | info.si_errno = 0; |
1749 | /* |
1750 | * see comment in do_notify_parent() about the following 4 lines |
1751 | */ |
1752 | rcu_read_lock(); |
1753 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); |
1754 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
1755 | rcu_read_unlock(); |
1756 | |
1757 | task_cputime(tsk, &utime, &stime); |
1758 | info.si_utime = cputime_to_clock_t(utime); |
1759 | info.si_stime = cputime_to_clock_t(stime); |
1760 | |
1761 | info.si_code = why; |
1762 | switch (why) { |
1763 | case CLD_CONTINUED: |
1764 | info.si_status = SIGCONT; |
1765 | break; |
1766 | case CLD_STOPPED: |
1767 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
1768 | break; |
1769 | case CLD_TRAPPED: |
1770 | info.si_status = tsk->exit_code & 0x7f; |
1771 | break; |
1772 | default: |
1773 | BUG(); |
1774 | } |
1775 | |
1776 | sighand = parent->sighand; |
1777 | spin_lock_irqsave(&sighand->siglock, flags); |
1778 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
1779 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
1780 | __group_send_sig_info(SIGCHLD, &info, parent); |
1781 | /* |
1782 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
1783 | */ |
1784 | __wake_up_parent(tsk, parent); |
1785 | spin_unlock_irqrestore(&sighand->siglock, flags); |
1786 | } |
1787 | |
1788 | static inline int may_ptrace_stop(void) |
1789 | { |
1790 | if (!likely(current->ptrace)) |
1791 | return 0; |
1792 | /* |
1793 | * Are we in the middle of do_coredump? |
1794 | * If so and our tracer is also part of the coredump stopping |
1795 | * is a deadlock situation, and pointless because our tracer |
1796 | * is dead so don't allow us to stop. |
1797 | * If SIGKILL was already sent before the caller unlocked |
1798 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1799 | * is safe to enter schedule(). |
1800 | * |
1801 | * This is almost outdated, a task with the pending SIGKILL can't |
1802 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported |
1803 | * after SIGKILL was already dequeued. |
1804 | */ |
1805 | if (unlikely(current->mm->core_state) && |
1806 | unlikely(current->mm == current->parent->mm)) |
1807 | return 0; |
1808 | |
1809 | return 1; |
1810 | } |
1811 | |
1812 | /* |
1813 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1814 | * Called with the siglock held. |
1815 | */ |
1816 | static int sigkill_pending(struct task_struct *tsk) |
1817 | { |
1818 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1819 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1820 | } |
1821 | |
1822 | /* |
1823 | * This must be called with current->sighand->siglock held. |
1824 | * |
1825 | * This should be the path for all ptrace stops. |
1826 | * We always set current->last_siginfo while stopped here. |
1827 | * That makes it a way to test a stopped process for |
1828 | * being ptrace-stopped vs being job-control-stopped. |
1829 | * |
1830 | * If we actually decide not to stop at all because the tracer |
1831 | * is gone, we keep current->exit_code unless clear_code. |
1832 | */ |
1833 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
1834 | __releases(¤t->sighand->siglock) |
1835 | __acquires(¤t->sighand->siglock) |
1836 | { |
1837 | bool gstop_done = false; |
1838 | |
1839 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1840 | /* |
1841 | * The arch code has something special to do before a |
1842 | * ptrace stop. This is allowed to block, e.g. for faults |
1843 | * on user stack pages. We can't keep the siglock while |
1844 | * calling arch_ptrace_stop, so we must release it now. |
1845 | * To preserve proper semantics, we must do this before |
1846 | * any signal bookkeeping like checking group_stop_count. |
1847 | * Meanwhile, a SIGKILL could come in before we retake the |
1848 | * siglock. That must prevent us from sleeping in TASK_TRACED. |
1849 | * So after regaining the lock, we must check for SIGKILL. |
1850 | */ |
1851 | spin_unlock_irq(¤t->sighand->siglock); |
1852 | arch_ptrace_stop(exit_code, info); |
1853 | spin_lock_irq(¤t->sighand->siglock); |
1854 | if (sigkill_pending(current)) |
1855 | return; |
1856 | } |
1857 | |
1858 | /* |
1859 | * We're committing to trapping. TRACED should be visible before |
1860 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). |
1861 | * Also, transition to TRACED and updates to ->jobctl should be |
1862 | * atomic with respect to siglock and should be done after the arch |
1863 | * hook as siglock is released and regrabbed across it. |
1864 | */ |
1865 | set_current_state(TASK_TRACED); |
1866 | |
1867 | current->last_siginfo = info; |
1868 | current->exit_code = exit_code; |
1869 | |
1870 | /* |
1871 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
1872 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
1873 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
1874 | * could be clear now. We act as if SIGCONT is received after |
1875 | * TASK_TRACED is entered - ignore it. |
1876 | */ |
1877 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
1878 | gstop_done = task_participate_group_stop(current); |
1879 | |
1880 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
1881 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
1882 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
1883 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); |
1884 | |
1885 | /* entering a trap, clear TRAPPING */ |
1886 | task_clear_jobctl_trapping(current); |
1887 | |
1888 | spin_unlock_irq(¤t->sighand->siglock); |
1889 | read_lock(&tasklist_lock); |
1890 | if (may_ptrace_stop()) { |
1891 | /* |
1892 | * Notify parents of the stop. |
1893 | * |
1894 | * While ptraced, there are two parents - the ptracer and |
1895 | * the real_parent of the group_leader. The ptracer should |
1896 | * know about every stop while the real parent is only |
1897 | * interested in the completion of group stop. The states |
1898 | * for the two don't interact with each other. Notify |
1899 | * separately unless they're gonna be duplicates. |
1900 | */ |
1901 | do_notify_parent_cldstop(current, true, why); |
1902 | if (gstop_done && ptrace_reparented(current)) |
1903 | do_notify_parent_cldstop(current, false, why); |
1904 | |
1905 | /* |
1906 | * Don't want to allow preemption here, because |
1907 | * sys_ptrace() needs this task to be inactive. |
1908 | * |
1909 | * XXX: implement read_unlock_no_resched(). |
1910 | */ |
1911 | preempt_disable(); |
1912 | read_unlock(&tasklist_lock); |
1913 | preempt_enable_no_resched(); |
1914 | freezable_schedule(); |
1915 | } else { |
1916 | /* |
1917 | * By the time we got the lock, our tracer went away. |
1918 | * Don't drop the lock yet, another tracer may come. |
1919 | * |
1920 | * If @gstop_done, the ptracer went away between group stop |
1921 | * completion and here. During detach, it would have set |
1922 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
1923 | * TASK_STOPPED in do_signal_stop() on return, so notifying |
1924 | * the real parent of the group stop completion is enough. |
1925 | */ |
1926 | if (gstop_done) |
1927 | do_notify_parent_cldstop(current, false, why); |
1928 | |
1929 | /* tasklist protects us from ptrace_freeze_traced() */ |
1930 | __set_current_state(TASK_RUNNING); |
1931 | if (clear_code) |
1932 | current->exit_code = 0; |
1933 | read_unlock(&tasklist_lock); |
1934 | } |
1935 | |
1936 | /* |
1937 | * We are back. Now reacquire the siglock before touching |
1938 | * last_siginfo, so that we are sure to have synchronized with |
1939 | * any signal-sending on another CPU that wants to examine it. |
1940 | */ |
1941 | spin_lock_irq(¤t->sighand->siglock); |
1942 | current->last_siginfo = NULL; |
1943 | |
1944 | /* LISTENING can be set only during STOP traps, clear it */ |
1945 | current->jobctl &= ~JOBCTL_LISTENING; |
1946 | |
1947 | /* |
1948 | * Queued signals ignored us while we were stopped for tracing. |
1949 | * So check for any that we should take before resuming user mode. |
1950 | * This sets TIF_SIGPENDING, but never clears it. |
1951 | */ |
1952 | recalc_sigpending_tsk(current); |
1953 | } |
1954 | |
1955 | static void ptrace_do_notify(int signr, int exit_code, int why) |
1956 | { |
1957 | siginfo_t info; |
1958 | |
1959 | memset(&info, 0, sizeof info); |
1960 | info.si_signo = signr; |
1961 | info.si_code = exit_code; |
1962 | info.si_pid = task_pid_vnr(current); |
1963 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1964 | |
1965 | /* Let the debugger run. */ |
1966 | ptrace_stop(exit_code, why, 1, &info); |
1967 | } |
1968 | |
1969 | void ptrace_notify(int exit_code) |
1970 | { |
1971 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1972 | if (unlikely(current->task_works)) |
1973 | task_work_run(); |
1974 | |
1975 | spin_lock_irq(¤t->sighand->siglock); |
1976 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1977 | spin_unlock_irq(¤t->sighand->siglock); |
1978 | } |
1979 | |
1980 | /** |
1981 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals |
1982 | * @signr: signr causing group stop if initiating |
1983 | * |
1984 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr |
1985 | * and participate in it. If already set, participate in the existing |
1986 | * group stop. If participated in a group stop (and thus slept), %true is |
1987 | * returned with siglock released. |
1988 | * |
1989 | * If ptraced, this function doesn't handle stop itself. Instead, |
1990 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock |
1991 | * untouched. The caller must ensure that INTERRUPT trap handling takes |
1992 | * places afterwards. |
1993 | * |
1994 | * CONTEXT: |
1995 | * Must be called with @current->sighand->siglock held, which is released |
1996 | * on %true return. |
1997 | * |
1998 | * RETURNS: |
1999 | * %false if group stop is already cancelled or ptrace trap is scheduled. |
2000 | * %true if participated in group stop. |
2001 | */ |
2002 | static bool do_signal_stop(int signr) |
2003 | __releases(¤t->sighand->siglock) |
2004 | { |
2005 | struct signal_struct *sig = current->signal; |
2006 | |
2007 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
2008 | unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
2009 | struct task_struct *t; |
2010 | |
2011 | /* signr will be recorded in task->jobctl for retries */ |
2012 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
2013 | |
2014 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
2015 | unlikely(signal_group_exit(sig))) |
2016 | return false; |
2017 | /* |
2018 | * There is no group stop already in progress. We must |
2019 | * initiate one now. |
2020 | * |
2021 | * While ptraced, a task may be resumed while group stop is |
2022 | * still in effect and then receive a stop signal and |
2023 | * initiate another group stop. This deviates from the |
2024 | * usual behavior as two consecutive stop signals can't |
2025 | * cause two group stops when !ptraced. That is why we |
2026 | * also check !task_is_stopped(t) below. |
2027 | * |
2028 | * The condition can be distinguished by testing whether |
2029 | * SIGNAL_STOP_STOPPED is already set. Don't generate |
2030 | * group_exit_code in such case. |
2031 | * |
2032 | * This is not necessary for SIGNAL_STOP_CONTINUED because |
2033 | * an intervening stop signal is required to cause two |
2034 | * continued events regardless of ptrace. |
2035 | */ |
2036 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
2037 | sig->group_exit_code = signr; |
2038 | |
2039 | sig->group_stop_count = 0; |
2040 | |
2041 | if (task_set_jobctl_pending(current, signr | gstop)) |
2042 | sig->group_stop_count++; |
2043 | |
2044 | t = current; |
2045 | while_each_thread(current, t) { |
2046 | /* |
2047 | * Setting state to TASK_STOPPED for a group |
2048 | * stop is always done with the siglock held, |
2049 | * so this check has no races. |
2050 | */ |
2051 | if (!task_is_stopped(t) && |
2052 | task_set_jobctl_pending(t, signr | gstop)) { |
2053 | sig->group_stop_count++; |
2054 | if (likely(!(t->ptrace & PT_SEIZED))) |
2055 | signal_wake_up(t, 0); |
2056 | else |
2057 | ptrace_trap_notify(t); |
2058 | } |
2059 | } |
2060 | } |
2061 | |
2062 | if (likely(!current->ptrace)) { |
2063 | int notify = 0; |
2064 | |
2065 | /* |
2066 | * If there are no other threads in the group, or if there |
2067 | * is a group stop in progress and we are the last to stop, |
2068 | * report to the parent. |
2069 | */ |
2070 | if (task_participate_group_stop(current)) |
2071 | notify = CLD_STOPPED; |
2072 | |
2073 | __set_current_state(TASK_STOPPED); |
2074 | spin_unlock_irq(¤t->sighand->siglock); |
2075 | |
2076 | /* |
2077 | * Notify the parent of the group stop completion. Because |
2078 | * we're not holding either the siglock or tasklist_lock |
2079 | * here, ptracer may attach inbetween; however, this is for |
2080 | * group stop and should always be delivered to the real |
2081 | * parent of the group leader. The new ptracer will get |
2082 | * its notification when this task transitions into |
2083 | * TASK_TRACED. |
2084 | */ |
2085 | if (notify) { |
2086 | read_lock(&tasklist_lock); |
2087 | do_notify_parent_cldstop(current, false, notify); |
2088 | read_unlock(&tasklist_lock); |
2089 | } |
2090 | |
2091 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
2092 | freezable_schedule(); |
2093 | return true; |
2094 | } else { |
2095 | /* |
2096 | * While ptraced, group stop is handled by STOP trap. |
2097 | * Schedule it and let the caller deal with it. |
2098 | */ |
2099 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); |
2100 | return false; |
2101 | } |
2102 | } |
2103 | |
2104 | /** |
2105 | * do_jobctl_trap - take care of ptrace jobctl traps |
2106 | * |
2107 | * When PT_SEIZED, it's used for both group stop and explicit |
2108 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with |
2109 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain |
2110 | * the stop signal; otherwise, %SIGTRAP. |
2111 | * |
2112 | * When !PT_SEIZED, it's used only for group stop trap with stop signal |
2113 | * number as exit_code and no siginfo. |
2114 | * |
2115 | * CONTEXT: |
2116 | * Must be called with @current->sighand->siglock held, which may be |
2117 | * released and re-acquired before returning with intervening sleep. |
2118 | */ |
2119 | static void do_jobctl_trap(void) |
2120 | { |
2121 | struct signal_struct *signal = current->signal; |
2122 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
2123 | |
2124 | if (current->ptrace & PT_SEIZED) { |
2125 | if (!signal->group_stop_count && |
2126 | !(signal->flags & SIGNAL_STOP_STOPPED)) |
2127 | signr = SIGTRAP; |
2128 | WARN_ON_ONCE(!signr); |
2129 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), |
2130 | CLD_STOPPED); |
2131 | } else { |
2132 | WARN_ON_ONCE(!signr); |
2133 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); |
2134 | current->exit_code = 0; |
2135 | } |
2136 | } |
2137 | |
2138 | static int ptrace_signal(int signr, siginfo_t *info) |
2139 | { |
2140 | ptrace_signal_deliver(); |
2141 | /* |
2142 | * We do not check sig_kernel_stop(signr) but set this marker |
2143 | * unconditionally because we do not know whether debugger will |
2144 | * change signr. This flag has no meaning unless we are going |
2145 | * to stop after return from ptrace_stop(). In this case it will |
2146 | * be checked in do_signal_stop(), we should only stop if it was |
2147 | * not cleared by SIGCONT while we were sleeping. See also the |
2148 | * comment in dequeue_signal(). |
2149 | */ |
2150 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
2151 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
2152 | |
2153 | /* We're back. Did the debugger cancel the sig? */ |
2154 | signr = current->exit_code; |
2155 | if (signr == 0) |
2156 | return signr; |
2157 | |
2158 | current->exit_code = 0; |
2159 | |
2160 | /* |
2161 | * Update the siginfo structure if the signal has |
2162 | * changed. If the debugger wanted something |
2163 | * specific in the siginfo structure then it should |
2164 | * have updated *info via PTRACE_SETSIGINFO. |
2165 | */ |
2166 | if (signr != info->si_signo) { |
2167 | info->si_signo = signr; |
2168 | info->si_errno = 0; |
2169 | info->si_code = SI_USER; |
2170 | rcu_read_lock(); |
2171 | info->si_pid = task_pid_vnr(current->parent); |
2172 | info->si_uid = from_kuid_munged(current_user_ns(), |
2173 | task_uid(current->parent)); |
2174 | rcu_read_unlock(); |
2175 | } |
2176 | |
2177 | /* If the (new) signal is now blocked, requeue it. */ |
2178 | if (sigismember(¤t->blocked, signr)) { |
2179 | specific_send_sig_info(signr, info, current); |
2180 | signr = 0; |
2181 | } |
2182 | |
2183 | return signr; |
2184 | } |
2185 | |
2186 | int get_signal(struct ksignal *ksig) |
2187 | { |
2188 | struct sighand_struct *sighand = current->sighand; |
2189 | struct signal_struct *signal = current->signal; |
2190 | int signr; |
2191 | |
2192 | if (unlikely(current->task_works)) |
2193 | task_work_run(); |
2194 | |
2195 | if (unlikely(uprobe_deny_signal())) |
2196 | return 0; |
2197 | |
2198 | /* |
2199 | * Do this once, we can't return to user-mode if freezing() == T. |
2200 | * do_signal_stop() and ptrace_stop() do freezable_schedule() and |
2201 | * thus do not need another check after return. |
2202 | */ |
2203 | try_to_freeze(); |
2204 | |
2205 | relock: |
2206 | spin_lock_irq(&sighand->siglock); |
2207 | /* |
2208 | * Every stopped thread goes here after wakeup. Check to see if |
2209 | * we should notify the parent, prepare_signal(SIGCONT) encodes |
2210 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
2211 | */ |
2212 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
2213 | int why; |
2214 | |
2215 | if (signal->flags & SIGNAL_CLD_CONTINUED) |
2216 | why = CLD_CONTINUED; |
2217 | else |
2218 | why = CLD_STOPPED; |
2219 | |
2220 | signal->flags &= ~SIGNAL_CLD_MASK; |
2221 | |
2222 | spin_unlock_irq(&sighand->siglock); |
2223 | |
2224 | /* |
2225 | * Notify the parent that we're continuing. This event is |
2226 | * always per-process and doesn't make whole lot of sense |
2227 | * for ptracers, who shouldn't consume the state via |
2228 | * wait(2) either, but, for backward compatibility, notify |
2229 | * the ptracer of the group leader too unless it's gonna be |
2230 | * a duplicate. |
2231 | */ |
2232 | read_lock(&tasklist_lock); |
2233 | do_notify_parent_cldstop(current, false, why); |
2234 | |
2235 | if (ptrace_reparented(current->group_leader)) |
2236 | do_notify_parent_cldstop(current->group_leader, |
2237 | true, why); |
2238 | read_unlock(&tasklist_lock); |
2239 | |
2240 | goto relock; |
2241 | } |
2242 | |
2243 | /* Has this task already been marked for death? */ |
2244 | if (signal_group_exit(signal)) { |
2245 | ksig->info.si_signo = signr = SIGKILL; |
2246 | sigdelset(¤t->pending.signal, SIGKILL); |
2247 | recalc_sigpending(); |
2248 | goto fatal; |
2249 | } |
2250 | |
2251 | for (;;) { |
2252 | struct k_sigaction *ka; |
2253 | |
2254 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2255 | do_signal_stop(0)) |
2256 | goto relock; |
2257 | |
2258 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { |
2259 | do_jobctl_trap(); |
2260 | spin_unlock_irq(&sighand->siglock); |
2261 | goto relock; |
2262 | } |
2263 | |
2264 | /* |
2265 | * Signals generated by the execution of an instruction |
2266 | * need to be delivered before any other pending signals |
2267 | * so that the instruction pointer in the signal stack |
2268 | * frame points to the faulting instruction. |
2269 | */ |
2270 | signr = dequeue_synchronous_signal(&ksig->info); |
2271 | if (!signr) |
2272 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); |
2273 | |
2274 | if (!signr) |
2275 | break; /* will return 0 */ |
2276 | |
2277 | if (unlikely(current->ptrace) && signr != SIGKILL) { |
2278 | signr = ptrace_signal(signr, &ksig->info); |
2279 | if (!signr) |
2280 | continue; |
2281 | } |
2282 | |
2283 | ka = &sighand->action[signr-1]; |
2284 | |
2285 | /* Trace actually delivered signals. */ |
2286 | trace_signal_deliver(signr, &ksig->info, ka); |
2287 | |
2288 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2289 | continue; |
2290 | if (ka->sa.sa_handler != SIG_DFL) { |
2291 | /* Run the handler. */ |
2292 | ksig->ka = *ka; |
2293 | |
2294 | if (ka->sa.sa_flags & SA_ONESHOT) |
2295 | ka->sa.sa_handler = SIG_DFL; |
2296 | |
2297 | break; /* will return non-zero "signr" value */ |
2298 | } |
2299 | |
2300 | /* |
2301 | * Now we are doing the default action for this signal. |
2302 | */ |
2303 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
2304 | continue; |
2305 | |
2306 | /* |
2307 | * Global init gets no signals it doesn't want. |
2308 | * Container-init gets no signals it doesn't want from same |
2309 | * container. |
2310 | * |
2311 | * Note that if global/container-init sees a sig_kernel_only() |
2312 | * signal here, the signal must have been generated internally |
2313 | * or must have come from an ancestor namespace. In either |
2314 | * case, the signal cannot be dropped. |
2315 | */ |
2316 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
2317 | !sig_kernel_only(signr)) |
2318 | continue; |
2319 | |
2320 | if (sig_kernel_stop(signr)) { |
2321 | /* |
2322 | * The default action is to stop all threads in |
2323 | * the thread group. The job control signals |
2324 | * do nothing in an orphaned pgrp, but SIGSTOP |
2325 | * always works. Note that siglock needs to be |
2326 | * dropped during the call to is_orphaned_pgrp() |
2327 | * because of lock ordering with tasklist_lock. |
2328 | * This allows an intervening SIGCONT to be posted. |
2329 | * We need to check for that and bail out if necessary. |
2330 | */ |
2331 | if (signr != SIGSTOP) { |
2332 | spin_unlock_irq(&sighand->siglock); |
2333 | |
2334 | /* signals can be posted during this window */ |
2335 | |
2336 | if (is_current_pgrp_orphaned()) |
2337 | goto relock; |
2338 | |
2339 | spin_lock_irq(&sighand->siglock); |
2340 | } |
2341 | |
2342 | if (likely(do_signal_stop(ksig->info.si_signo))) { |
2343 | /* It released the siglock. */ |
2344 | goto relock; |
2345 | } |
2346 | |
2347 | /* |
2348 | * We didn't actually stop, due to a race |
2349 | * with SIGCONT or something like that. |
2350 | */ |
2351 | continue; |
2352 | } |
2353 | |
2354 | fatal: |
2355 | spin_unlock_irq(&sighand->siglock); |
2356 | |
2357 | /* |
2358 | * Anything else is fatal, maybe with a core dump. |
2359 | */ |
2360 | current->flags |= PF_SIGNALED; |
2361 | |
2362 | if (sig_kernel_coredump(signr)) { |
2363 | if (print_fatal_signals) |
2364 | print_fatal_signal(ksig->info.si_signo); |
2365 | proc_coredump_connector(current); |
2366 | /* |
2367 | * If it was able to dump core, this kills all |
2368 | * other threads in the group and synchronizes with |
2369 | * their demise. If we lost the race with another |
2370 | * thread getting here, it set group_exit_code |
2371 | * first and our do_group_exit call below will use |
2372 | * that value and ignore the one we pass it. |
2373 | */ |
2374 | do_coredump(&ksig->info); |
2375 | } |
2376 | |
2377 | /* |
2378 | * Death signals, no core dump. |
2379 | */ |
2380 | do_group_exit(ksig->info.si_signo); |
2381 | /* NOTREACHED */ |
2382 | } |
2383 | spin_unlock_irq(&sighand->siglock); |
2384 | |
2385 | ksig->sig = signr; |
2386 | return ksig->sig > 0; |
2387 | } |
2388 | |
2389 | /** |
2390 | * signal_delivered - |
2391 | * @ksig: kernel signal struct |
2392 | * @stepping: nonzero if debugger single-step or block-step in use |
2393 | * |
2394 | * This function should be called when a signal has successfully been |
2395 | * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask |
2396 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
2397 | * is set in @ksig->ka.sa.sa_flags. Tracing is notified. |
2398 | */ |
2399 | static void signal_delivered(struct ksignal *ksig, int stepping) |
2400 | { |
2401 | sigset_t blocked; |
2402 | |
2403 | /* A signal was successfully delivered, and the |
2404 | saved sigmask was stored on the signal frame, |
2405 | and will be restored by sigreturn. So we can |
2406 | simply clear the restore sigmask flag. */ |
2407 | clear_restore_sigmask(); |
2408 | |
2409 | sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); |
2410 | if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) |
2411 | sigaddset(&blocked, ksig->sig); |
2412 | set_current_blocked(&blocked); |
2413 | tracehook_signal_handler(stepping); |
2414 | } |
2415 | |
2416 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) |
2417 | { |
2418 | if (failed) |
2419 | force_sigsegv(ksig->sig, current); |
2420 | else |
2421 | signal_delivered(ksig, stepping); |
2422 | } |
2423 | |
2424 | /* |
2425 | * It could be that complete_signal() picked us to notify about the |
2426 | * group-wide signal. Other threads should be notified now to take |
2427 | * the shared signals in @which since we will not. |
2428 | */ |
2429 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
2430 | { |
2431 | sigset_t retarget; |
2432 | struct task_struct *t; |
2433 | |
2434 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); |
2435 | if (sigisemptyset(&retarget)) |
2436 | return; |
2437 | |
2438 | t = tsk; |
2439 | while_each_thread(tsk, t) { |
2440 | if (t->flags & PF_EXITING) |
2441 | continue; |
2442 | |
2443 | if (!has_pending_signals(&retarget, &t->blocked)) |
2444 | continue; |
2445 | /* Remove the signals this thread can handle. */ |
2446 | sigandsets(&retarget, &retarget, &t->blocked); |
2447 | |
2448 | if (!signal_pending(t)) |
2449 | signal_wake_up(t, 0); |
2450 | |
2451 | if (sigisemptyset(&retarget)) |
2452 | break; |
2453 | } |
2454 | } |
2455 | |
2456 | void exit_signals(struct task_struct *tsk) |
2457 | { |
2458 | int group_stop = 0; |
2459 | sigset_t unblocked; |
2460 | |
2461 | /* |
2462 | * @tsk is about to have PF_EXITING set - lock out users which |
2463 | * expect stable threadgroup. |
2464 | */ |
2465 | threadgroup_change_begin(tsk); |
2466 | |
2467 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2468 | tsk->flags |= PF_EXITING; |
2469 | threadgroup_change_end(tsk); |
2470 | return; |
2471 | } |
2472 | |
2473 | spin_lock_irq(&tsk->sighand->siglock); |
2474 | /* |
2475 | * From now this task is not visible for group-wide signals, |
2476 | * see wants_signal(), do_signal_stop(). |
2477 | */ |
2478 | tsk->flags |= PF_EXITING; |
2479 | |
2480 | threadgroup_change_end(tsk); |
2481 | |
2482 | if (!signal_pending(tsk)) |
2483 | goto out; |
2484 | |
2485 | unblocked = tsk->blocked; |
2486 | signotset(&unblocked); |
2487 | retarget_shared_pending(tsk, &unblocked); |
2488 | |
2489 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
2490 | task_participate_group_stop(tsk)) |
2491 | group_stop = CLD_STOPPED; |
2492 | out: |
2493 | spin_unlock_irq(&tsk->sighand->siglock); |
2494 | |
2495 | /* |
2496 | * If group stop has completed, deliver the notification. This |
2497 | * should always go to the real parent of the group leader. |
2498 | */ |
2499 | if (unlikely(group_stop)) { |
2500 | read_lock(&tasklist_lock); |
2501 | do_notify_parent_cldstop(tsk, false, group_stop); |
2502 | read_unlock(&tasklist_lock); |
2503 | } |
2504 | } |
2505 | |
2506 | EXPORT_SYMBOL(recalc_sigpending); |
2507 | EXPORT_SYMBOL_GPL(dequeue_signal); |
2508 | EXPORT_SYMBOL(flush_signals); |
2509 | EXPORT_SYMBOL(force_sig); |
2510 | EXPORT_SYMBOL(send_sig); |
2511 | EXPORT_SYMBOL(send_sig_info); |
2512 | EXPORT_SYMBOL(sigprocmask); |
2513 | |
2514 | /* |
2515 | * System call entry points. |
2516 | */ |
2517 | |
2518 | /** |
2519 | * sys_restart_syscall - restart a system call |
2520 | */ |
2521 | SYSCALL_DEFINE0(restart_syscall) |
2522 | { |
2523 | struct restart_block *restart = ¤t->restart_block; |
2524 | return restart->fn(restart); |
2525 | } |
2526 | |
2527 | long do_no_restart_syscall(struct restart_block *param) |
2528 | { |
2529 | return -EINTR; |
2530 | } |
2531 | |
2532 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2533 | { |
2534 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { |
2535 | sigset_t newblocked; |
2536 | /* A set of now blocked but previously unblocked signals. */ |
2537 | sigandnsets(&newblocked, newset, ¤t->blocked); |
2538 | retarget_shared_pending(tsk, &newblocked); |
2539 | } |
2540 | tsk->blocked = *newset; |
2541 | recalc_sigpending(); |
2542 | } |
2543 | |
2544 | /** |
2545 | * set_current_blocked - change current->blocked mask |
2546 | * @newset: new mask |
2547 | * |
2548 | * It is wrong to change ->blocked directly, this helper should be used |
2549 | * to ensure the process can't miss a shared signal we are going to block. |
2550 | */ |
2551 | void set_current_blocked(sigset_t *newset) |
2552 | { |
2553 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2554 | __set_current_blocked(newset); |
2555 | } |
2556 | |
2557 | void __set_current_blocked(const sigset_t *newset) |
2558 | { |
2559 | struct task_struct *tsk = current; |
2560 | |
2561 | /* |
2562 | * In case the signal mask hasn't changed, there is nothing we need |
2563 | * to do. The current->blocked shouldn't be modified by other task. |
2564 | */ |
2565 | if (sigequalsets(&tsk->blocked, newset)) |
2566 | return; |
2567 | |
2568 | spin_lock_irq(&tsk->sighand->siglock); |
2569 | __set_task_blocked(tsk, newset); |
2570 | spin_unlock_irq(&tsk->sighand->siglock); |
2571 | } |
2572 | |
2573 | /* |
2574 | * This is also useful for kernel threads that want to temporarily |
2575 | * (or permanently) block certain signals. |
2576 | * |
2577 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
2578 | * interface happily blocks "unblockable" signals like SIGKILL |
2579 | * and friends. |
2580 | */ |
2581 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2582 | { |
2583 | struct task_struct *tsk = current; |
2584 | sigset_t newset; |
2585 | |
2586 | /* Lockless, only current can change ->blocked, never from irq */ |
2587 | if (oldset) |
2588 | *oldset = tsk->blocked; |
2589 | |
2590 | switch (how) { |
2591 | case SIG_BLOCK: |
2592 | sigorsets(&newset, &tsk->blocked, set); |
2593 | break; |
2594 | case SIG_UNBLOCK: |
2595 | sigandnsets(&newset, &tsk->blocked, set); |
2596 | break; |
2597 | case SIG_SETMASK: |
2598 | newset = *set; |
2599 | break; |
2600 | default: |
2601 | return -EINVAL; |
2602 | } |
2603 | |
2604 | __set_current_blocked(&newset); |
2605 | return 0; |
2606 | } |
2607 | |
2608 | /** |
2609 | * sys_rt_sigprocmask - change the list of currently blocked signals |
2610 | * @how: whether to add, remove, or set signals |
2611 | * @nset: stores pending signals |
2612 | * @oset: previous value of signal mask if non-null |
2613 | * @sigsetsize: size of sigset_t type |
2614 | */ |
2615 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
2616 | sigset_t __user *, oset, size_t, sigsetsize) |
2617 | { |
2618 | sigset_t old_set, new_set; |
2619 | int error; |
2620 | |
2621 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2622 | if (sigsetsize != sizeof(sigset_t)) |
2623 | return -EINVAL; |
2624 | |
2625 | old_set = current->blocked; |
2626 | |
2627 | if (nset) { |
2628 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) |
2629 | return -EFAULT; |
2630 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2631 | |
2632 | error = sigprocmask(how, &new_set, NULL); |
2633 | if (error) |
2634 | return error; |
2635 | } |
2636 | |
2637 | if (oset) { |
2638 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) |
2639 | return -EFAULT; |
2640 | } |
2641 | |
2642 | return 0; |
2643 | } |
2644 | |
2645 | #ifdef CONFIG_COMPAT |
2646 | COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, |
2647 | compat_sigset_t __user *, oset, compat_size_t, sigsetsize) |
2648 | { |
2649 | #ifdef __BIG_ENDIAN |
2650 | sigset_t old_set = current->blocked; |
2651 | |
2652 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2653 | if (sigsetsize != sizeof(sigset_t)) |
2654 | return -EINVAL; |
2655 | |
2656 | if (nset) { |
2657 | compat_sigset_t new32; |
2658 | sigset_t new_set; |
2659 | int error; |
2660 | if (copy_from_user(&new32, nset, sizeof(compat_sigset_t))) |
2661 | return -EFAULT; |
2662 | |
2663 | sigset_from_compat(&new_set, &new32); |
2664 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2665 | |
2666 | error = sigprocmask(how, &new_set, NULL); |
2667 | if (error) |
2668 | return error; |
2669 | } |
2670 | if (oset) { |
2671 | compat_sigset_t old32; |
2672 | sigset_to_compat(&old32, &old_set); |
2673 | if (copy_to_user(oset, &old32, sizeof(compat_sigset_t))) |
2674 | return -EFAULT; |
2675 | } |
2676 | return 0; |
2677 | #else |
2678 | return sys_rt_sigprocmask(how, (sigset_t __user *)nset, |
2679 | (sigset_t __user *)oset, sigsetsize); |
2680 | #endif |
2681 | } |
2682 | #endif |
2683 | |
2684 | static int do_sigpending(void *set, unsigned long sigsetsize) |
2685 | { |
2686 | if (sigsetsize > sizeof(sigset_t)) |
2687 | return -EINVAL; |
2688 | |
2689 | spin_lock_irq(¤t->sighand->siglock); |
2690 | sigorsets(set, ¤t->pending.signal, |
2691 | ¤t->signal->shared_pending.signal); |
2692 | spin_unlock_irq(¤t->sighand->siglock); |
2693 | |
2694 | /* Outside the lock because only this thread touches it. */ |
2695 | sigandsets(set, ¤t->blocked, set); |
2696 | return 0; |
2697 | } |
2698 | |
2699 | /** |
2700 | * sys_rt_sigpending - examine a pending signal that has been raised |
2701 | * while blocked |
2702 | * @uset: stores pending signals |
2703 | * @sigsetsize: size of sigset_t type or larger |
2704 | */ |
2705 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
2706 | { |
2707 | sigset_t set; |
2708 | int err = do_sigpending(&set, sigsetsize); |
2709 | if (!err && copy_to_user(uset, &set, sigsetsize)) |
2710 | err = -EFAULT; |
2711 | return err; |
2712 | } |
2713 | |
2714 | #ifdef CONFIG_COMPAT |
2715 | COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, |
2716 | compat_size_t, sigsetsize) |
2717 | { |
2718 | #ifdef __BIG_ENDIAN |
2719 | sigset_t set; |
2720 | int err = do_sigpending(&set, sigsetsize); |
2721 | if (!err) { |
2722 | compat_sigset_t set32; |
2723 | sigset_to_compat(&set32, &set); |
2724 | /* we can get here only if sigsetsize <= sizeof(set) */ |
2725 | if (copy_to_user(uset, &set32, sigsetsize)) |
2726 | err = -EFAULT; |
2727 | } |
2728 | return err; |
2729 | #else |
2730 | return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize); |
2731 | #endif |
2732 | } |
2733 | #endif |
2734 | |
2735 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
2736 | |
2737 | int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) |
2738 | { |
2739 | int err; |
2740 | |
2741 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) |
2742 | return -EFAULT; |
2743 | if (from->si_code < 0) |
2744 | return __copy_to_user(to, from, sizeof(siginfo_t)) |
2745 | ? -EFAULT : 0; |
2746 | /* |
2747 | * If you change siginfo_t structure, please be sure |
2748 | * this code is fixed accordingly. |
2749 | * Please remember to update the signalfd_copyinfo() function |
2750 | * inside fs/signalfd.c too, in case siginfo_t changes. |
2751 | * It should never copy any pad contained in the structure |
2752 | * to avoid security leaks, but must copy the generic |
2753 | * 3 ints plus the relevant union member. |
2754 | */ |
2755 | err = __put_user(from->si_signo, &to->si_signo); |
2756 | err |= __put_user(from->si_errno, &to->si_errno); |
2757 | err |= __put_user((short)from->si_code, &to->si_code); |
2758 | switch (from->si_code & __SI_MASK) { |
2759 | case __SI_KILL: |
2760 | err |= __put_user(from->si_pid, &to->si_pid); |
2761 | err |= __put_user(from->si_uid, &to->si_uid); |
2762 | break; |
2763 | case __SI_TIMER: |
2764 | err |= __put_user(from->si_tid, &to->si_tid); |
2765 | err |= __put_user(from->si_overrun, &to->si_overrun); |
2766 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2767 | break; |
2768 | case __SI_POLL: |
2769 | err |= __put_user(from->si_band, &to->si_band); |
2770 | err |= __put_user(from->si_fd, &to->si_fd); |
2771 | break; |
2772 | case __SI_FAULT: |
2773 | err |= __put_user(from->si_addr, &to->si_addr); |
2774 | #ifdef __ARCH_SI_TRAPNO |
2775 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2776 | #endif |
2777 | #ifdef BUS_MCEERR_AO |
2778 | /* |
2779 | * Other callers might not initialize the si_lsb field, |
2780 | * so check explicitly for the right codes here. |
2781 | */ |
2782 | if (from->si_signo == SIGBUS && |
2783 | (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) |
2784 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
2785 | #endif |
2786 | #ifdef SEGV_BNDERR |
2787 | if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) { |
2788 | err |= __put_user(from->si_lower, &to->si_lower); |
2789 | err |= __put_user(from->si_upper, &to->si_upper); |
2790 | } |
2791 | #endif |
2792 | #ifdef SEGV_PKUERR |
2793 | if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR) |
2794 | err |= __put_user(from->si_pkey, &to->si_pkey); |
2795 | #endif |
2796 | break; |
2797 | case __SI_CHLD: |
2798 | err |= __put_user(from->si_pid, &to->si_pid); |
2799 | err |= __put_user(from->si_uid, &to->si_uid); |
2800 | err |= __put_user(from->si_status, &to->si_status); |
2801 | err |= __put_user(from->si_utime, &to->si_utime); |
2802 | err |= __put_user(from->si_stime, &to->si_stime); |
2803 | break; |
2804 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
2805 | case __SI_MESGQ: /* But this is */ |
2806 | err |= __put_user(from->si_pid, &to->si_pid); |
2807 | err |= __put_user(from->si_uid, &to->si_uid); |
2808 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2809 | break; |
2810 | #ifdef __ARCH_SIGSYS |
2811 | case __SI_SYS: |
2812 | err |= __put_user(from->si_call_addr, &to->si_call_addr); |
2813 | err |= __put_user(from->si_syscall, &to->si_syscall); |
2814 | err |= __put_user(from->si_arch, &to->si_arch); |
2815 | break; |
2816 | #endif |
2817 | default: /* this is just in case for now ... */ |
2818 | err |= __put_user(from->si_pid, &to->si_pid); |
2819 | err |= __put_user(from->si_uid, &to->si_uid); |
2820 | break; |
2821 | } |
2822 | return err; |
2823 | } |
2824 | |
2825 | #endif |
2826 | |
2827 | /** |
2828 | * do_sigtimedwait - wait for queued signals specified in @which |
2829 | * @which: queued signals to wait for |
2830 | * @info: if non-null, the signal's siginfo is returned here |
2831 | * @ts: upper bound on process time suspension |
2832 | */ |
2833 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, |
2834 | const struct timespec *ts) |
2835 | { |
2836 | ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; |
2837 | struct task_struct *tsk = current; |
2838 | sigset_t mask = *which; |
2839 | int sig, ret = 0; |
2840 | |
2841 | if (ts) { |
2842 | if (!timespec_valid(ts)) |
2843 | return -EINVAL; |
2844 | timeout = timespec_to_ktime(*ts); |
2845 | to = &timeout; |
2846 | } |
2847 | |
2848 | /* |
2849 | * Invert the set of allowed signals to get those we want to block. |
2850 | */ |
2851 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2852 | signotset(&mask); |
2853 | |
2854 | spin_lock_irq(&tsk->sighand->siglock); |
2855 | sig = dequeue_signal(tsk, &mask, info); |
2856 | if (!sig && timeout.tv64) { |
2857 | /* |
2858 | * None ready, temporarily unblock those we're interested |
2859 | * while we are sleeping in so that we'll be awakened when |
2860 | * they arrive. Unblocking is always fine, we can avoid |
2861 | * set_current_blocked(). |
2862 | */ |
2863 | tsk->real_blocked = tsk->blocked; |
2864 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); |
2865 | recalc_sigpending(); |
2866 | spin_unlock_irq(&tsk->sighand->siglock); |
2867 | |
2868 | __set_current_state(TASK_INTERRUPTIBLE); |
2869 | ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, |
2870 | HRTIMER_MODE_REL); |
2871 | spin_lock_irq(&tsk->sighand->siglock); |
2872 | __set_task_blocked(tsk, &tsk->real_blocked); |
2873 | sigemptyset(&tsk->real_blocked); |
2874 | sig = dequeue_signal(tsk, &mask, info); |
2875 | } |
2876 | spin_unlock_irq(&tsk->sighand->siglock); |
2877 | |
2878 | if (sig) |
2879 | return sig; |
2880 | return ret ? -EINTR : -EAGAIN; |
2881 | } |
2882 | |
2883 | /** |
2884 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified |
2885 | * in @uthese |
2886 | * @uthese: queued signals to wait for |
2887 | * @uinfo: if non-null, the signal's siginfo is returned here |
2888 | * @uts: upper bound on process time suspension |
2889 | * @sigsetsize: size of sigset_t type |
2890 | */ |
2891 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
2892 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2893 | size_t, sigsetsize) |
2894 | { |
2895 | sigset_t these; |
2896 | struct timespec ts; |
2897 | siginfo_t info; |
2898 | int ret; |
2899 | |
2900 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2901 | if (sigsetsize != sizeof(sigset_t)) |
2902 | return -EINVAL; |
2903 | |
2904 | if (copy_from_user(&these, uthese, sizeof(these))) |
2905 | return -EFAULT; |
2906 | |
2907 | if (uts) { |
2908 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2909 | return -EFAULT; |
2910 | } |
2911 | |
2912 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
2913 | |
2914 | if (ret > 0 && uinfo) { |
2915 | if (copy_siginfo_to_user(uinfo, &info)) |
2916 | ret = -EFAULT; |
2917 | } |
2918 | |
2919 | return ret; |
2920 | } |
2921 | |
2922 | /** |
2923 | * sys_kill - send a signal to a process |
2924 | * @pid: the PID of the process |
2925 | * @sig: signal to be sent |
2926 | */ |
2927 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
2928 | { |
2929 | struct siginfo info; |
2930 | |
2931 | info.si_signo = sig; |
2932 | info.si_errno = 0; |
2933 | info.si_code = SI_USER; |
2934 | info.si_pid = task_tgid_vnr(current); |
2935 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
2936 | |
2937 | return kill_something_info(sig, &info, pid); |
2938 | } |
2939 | |
2940 | static int |
2941 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
2942 | { |
2943 | struct task_struct *p; |
2944 | int error = -ESRCH; |
2945 | |
2946 | rcu_read_lock(); |
2947 | p = find_task_by_vpid(pid); |
2948 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
2949 | error = check_kill_permission(sig, info, p); |
2950 | /* |
2951 | * The null signal is a permissions and process existence |
2952 | * probe. No signal is actually delivered. |
2953 | */ |
2954 | if (!error && sig) { |
2955 | error = do_send_sig_info(sig, info, p, false); |
2956 | /* |
2957 | * If lock_task_sighand() failed we pretend the task |
2958 | * dies after receiving the signal. The window is tiny, |
2959 | * and the signal is private anyway. |
2960 | */ |
2961 | if (unlikely(error == -ESRCH)) |
2962 | error = 0; |
2963 | } |
2964 | } |
2965 | rcu_read_unlock(); |
2966 | |
2967 | return error; |
2968 | } |
2969 | |
2970 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2971 | { |
2972 | struct siginfo info = {}; |
2973 | |
2974 | info.si_signo = sig; |
2975 | info.si_errno = 0; |
2976 | info.si_code = SI_TKILL; |
2977 | info.si_pid = task_tgid_vnr(current); |
2978 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
2979 | |
2980 | return do_send_specific(tgid, pid, sig, &info); |
2981 | } |
2982 | |
2983 | /** |
2984 | * sys_tgkill - send signal to one specific thread |
2985 | * @tgid: the thread group ID of the thread |
2986 | * @pid: the PID of the thread |
2987 | * @sig: signal to be sent |
2988 | * |
2989 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
2990 | * exists but it's not belonging to the target process anymore. This |
2991 | * method solves the problem of threads exiting and PIDs getting reused. |
2992 | */ |
2993 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
2994 | { |
2995 | /* This is only valid for single tasks */ |
2996 | if (pid <= 0 || tgid <= 0) |
2997 | return -EINVAL; |
2998 | |
2999 | return do_tkill(tgid, pid, sig); |
3000 | } |
3001 | |
3002 | /** |
3003 | * sys_tkill - send signal to one specific task |
3004 | * @pid: the PID of the task |
3005 | * @sig: signal to be sent |
3006 | * |
3007 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
3008 | */ |
3009 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
3010 | { |
3011 | /* This is only valid for single tasks */ |
3012 | if (pid <= 0) |
3013 | return -EINVAL; |
3014 | |
3015 | return do_tkill(0, pid, sig); |
3016 | } |
3017 | |
3018 | static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) |
3019 | { |
3020 | /* Not even root can pretend to send signals from the kernel. |
3021 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3022 | */ |
3023 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
3024 | (task_pid_vnr(current) != pid)) |
3025 | return -EPERM; |
3026 | |
3027 | info->si_signo = sig; |
3028 | |
3029 | /* POSIX.1b doesn't mention process groups. */ |
3030 | return kill_proc_info(sig, info, pid); |
3031 | } |
3032 | |
3033 | /** |
3034 | * sys_rt_sigqueueinfo - send signal information to a signal |
3035 | * @pid: the PID of the thread |
3036 | * @sig: signal to be sent |
3037 | * @uinfo: signal info to be sent |
3038 | */ |
3039 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
3040 | siginfo_t __user *, uinfo) |
3041 | { |
3042 | siginfo_t info; |
3043 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
3044 | return -EFAULT; |
3045 | return do_rt_sigqueueinfo(pid, sig, &info); |
3046 | } |
3047 | |
3048 | #ifdef CONFIG_COMPAT |
3049 | COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, |
3050 | compat_pid_t, pid, |
3051 | int, sig, |
3052 | struct compat_siginfo __user *, uinfo) |
3053 | { |
3054 | siginfo_t info = {}; |
3055 | int ret = copy_siginfo_from_user32(&info, uinfo); |
3056 | if (unlikely(ret)) |
3057 | return ret; |
3058 | return do_rt_sigqueueinfo(pid, sig, &info); |
3059 | } |
3060 | #endif |
3061 | |
3062 | static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
3063 | { |
3064 | /* This is only valid for single tasks */ |
3065 | if (pid <= 0 || tgid <= 0) |
3066 | return -EINVAL; |
3067 | |
3068 | /* Not even root can pretend to send signals from the kernel. |
3069 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3070 | */ |
3071 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
3072 | (task_pid_vnr(current) != pid)) |
3073 | return -EPERM; |
3074 | |
3075 | info->si_signo = sig; |
3076 | |
3077 | return do_send_specific(tgid, pid, sig, info); |
3078 | } |
3079 | |
3080 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, |
3081 | siginfo_t __user *, uinfo) |
3082 | { |
3083 | siginfo_t info; |
3084 | |
3085 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
3086 | return -EFAULT; |
3087 | |
3088 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
3089 | } |
3090 | |
3091 | #ifdef CONFIG_COMPAT |
3092 | COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, |
3093 | compat_pid_t, tgid, |
3094 | compat_pid_t, pid, |
3095 | int, sig, |
3096 | struct compat_siginfo __user *, uinfo) |
3097 | { |
3098 | siginfo_t info = {}; |
3099 | |
3100 | if (copy_siginfo_from_user32(&info, uinfo)) |
3101 | return -EFAULT; |
3102 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
3103 | } |
3104 | #endif |
3105 | |
3106 | /* |
3107 | * For kthreads only, must not be used if cloned with CLONE_SIGHAND |
3108 | */ |
3109 | void kernel_sigaction(int sig, __sighandler_t action) |
3110 | { |
3111 | spin_lock_irq(¤t->sighand->siglock); |
3112 | current->sighand->action[sig - 1].sa.sa_handler = action; |
3113 | if (action == SIG_IGN) { |
3114 | sigset_t mask; |
3115 | |
3116 | sigemptyset(&mask); |
3117 | sigaddset(&mask, sig); |
3118 | |
3119 | flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); |
3120 | flush_sigqueue_mask(&mask, ¤t->pending); |
3121 | recalc_sigpending(); |
3122 | } |
3123 | spin_unlock_irq(¤t->sighand->siglock); |
3124 | } |
3125 | EXPORT_SYMBOL(kernel_sigaction); |
3126 | |
3127 | void __weak sigaction_compat_abi(struct k_sigaction *act, |
3128 | struct k_sigaction *oact) |
3129 | { |
3130 | } |
3131 | |
3132 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
3133 | { |
3134 | struct task_struct *p = current, *t; |
3135 | struct k_sigaction *k; |
3136 | sigset_t mask; |
3137 | |
3138 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
3139 | return -EINVAL; |
3140 | |
3141 | k = &p->sighand->action[sig-1]; |
3142 | |
3143 | spin_lock_irq(&p->sighand->siglock); |
3144 | if (oact) |
3145 | *oact = *k; |
3146 | |
3147 | sigaction_compat_abi(act, oact); |
3148 | |
3149 | if (act) { |
3150 | sigdelsetmask(&act->sa.sa_mask, |
3151 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
3152 | *k = *act; |
3153 | /* |
3154 | * POSIX 3.3.1.3: |
3155 | * "Setting a signal action to SIG_IGN for a signal that is |
3156 | * pending shall cause the pending signal to be discarded, |
3157 | * whether or not it is blocked." |
3158 | * |
3159 | * "Setting a signal action to SIG_DFL for a signal that is |
3160 | * pending and whose default action is to ignore the signal |
3161 | * (for example, SIGCHLD), shall cause the pending signal to |
3162 | * be discarded, whether or not it is blocked" |
3163 | */ |
3164 | if (sig_handler_ignored(sig_handler(p, sig), sig)) { |
3165 | sigemptyset(&mask); |
3166 | sigaddset(&mask, sig); |
3167 | flush_sigqueue_mask(&mask, &p->signal->shared_pending); |
3168 | for_each_thread(p, t) |
3169 | flush_sigqueue_mask(&mask, &t->pending); |
3170 | } |
3171 | } |
3172 | |
3173 | spin_unlock_irq(&p->sighand->siglock); |
3174 | return 0; |
3175 | } |
3176 | |
3177 | static int |
3178 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp, |
3179 | size_t min_ss_size) |
3180 | { |
3181 | stack_t oss; |
3182 | int error; |
3183 | |
3184 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
3185 | oss.ss_size = current->sas_ss_size; |
3186 | oss.ss_flags = sas_ss_flags(sp) | |
3187 | (current->sas_ss_flags & SS_FLAG_BITS); |
3188 | |
3189 | if (uss) { |
3190 | void __user *ss_sp; |
3191 | size_t ss_size; |
3192 | unsigned ss_flags; |
3193 | int ss_mode; |
3194 | |
3195 | error = -EFAULT; |
3196 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
3197 | goto out; |
3198 | error = __get_user(ss_sp, &uss->ss_sp) | |
3199 | __get_user(ss_flags, &uss->ss_flags) | |
3200 | __get_user(ss_size, &uss->ss_size); |
3201 | if (error) |
3202 | goto out; |
3203 | |
3204 | error = -EPERM; |
3205 | if (on_sig_stack(sp)) |
3206 | goto out; |
3207 | |
3208 | ss_mode = ss_flags & ~SS_FLAG_BITS; |
3209 | error = -EINVAL; |
3210 | if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && |
3211 | ss_mode != 0) |
3212 | goto out; |
3213 | |
3214 | if (ss_mode == SS_DISABLE) { |
3215 | ss_size = 0; |
3216 | ss_sp = NULL; |
3217 | } else { |
3218 | if (unlikely(ss_size < min_ss_size)) |
3219 | return -ENOMEM; |
3220 | } |
3221 | |
3222 | current->sas_ss_sp = (unsigned long) ss_sp; |
3223 | current->sas_ss_size = ss_size; |
3224 | current->sas_ss_flags = ss_flags; |
3225 | } |
3226 | |
3227 | error = 0; |
3228 | if (uoss) { |
3229 | error = -EFAULT; |
3230 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
3231 | goto out; |
3232 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | |
3233 | __put_user(oss.ss_size, &uoss->ss_size) | |
3234 | __put_user(oss.ss_flags, &uoss->ss_flags); |
3235 | } |
3236 | |
3237 | out: |
3238 | return error; |
3239 | } |
3240 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) |
3241 | { |
3242 | return do_sigaltstack(uss, uoss, current_user_stack_pointer(), |
3243 | MINSIGSTKSZ); |
3244 | } |
3245 | |
3246 | int restore_altstack(const stack_t __user *uss) |
3247 | { |
3248 | int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(), |
3249 | MINSIGSTKSZ); |
3250 | /* squash all but EFAULT for now */ |
3251 | return err == -EFAULT ? err : 0; |
3252 | } |
3253 | |
3254 | int __save_altstack(stack_t __user *uss, unsigned long sp) |
3255 | { |
3256 | struct task_struct *t = current; |
3257 | int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | |
3258 | __put_user(t->sas_ss_flags, &uss->ss_flags) | |
3259 | __put_user(t->sas_ss_size, &uss->ss_size); |
3260 | if (err) |
3261 | return err; |
3262 | if (t->sas_ss_flags & SS_AUTODISARM) |
3263 | sas_ss_reset(t); |
3264 | return 0; |
3265 | } |
3266 | |
3267 | #ifdef CONFIG_COMPAT |
3268 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
3269 | const compat_stack_t __user *, uss_ptr, |
3270 | compat_stack_t __user *, uoss_ptr) |
3271 | { |
3272 | stack_t uss, uoss; |
3273 | int ret; |
3274 | mm_segment_t seg; |
3275 | |
3276 | if (uss_ptr) { |
3277 | compat_stack_t uss32; |
3278 | |
3279 | memset(&uss, 0, sizeof(stack_t)); |
3280 | if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) |
3281 | return -EFAULT; |
3282 | uss.ss_sp = compat_ptr(uss32.ss_sp); |
3283 | uss.ss_flags = uss32.ss_flags; |
3284 | uss.ss_size = uss32.ss_size; |
3285 | } |
3286 | seg = get_fs(); |
3287 | set_fs(KERNEL_DS); |
3288 | ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), |
3289 | (stack_t __force __user *) &uoss, |
3290 | compat_user_stack_pointer(), |
3291 | COMPAT_MINSIGSTKSZ); |
3292 | set_fs(seg); |
3293 | if (ret >= 0 && uoss_ptr) { |
3294 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || |
3295 | __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || |
3296 | __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || |
3297 | __put_user(uoss.ss_size, &uoss_ptr->ss_size)) |
3298 | ret = -EFAULT; |
3299 | } |
3300 | return ret; |
3301 | } |
3302 | |
3303 | int compat_restore_altstack(const compat_stack_t __user *uss) |
3304 | { |
3305 | int err = compat_sys_sigaltstack(uss, NULL); |
3306 | /* squash all but -EFAULT for now */ |
3307 | return err == -EFAULT ? err : 0; |
3308 | } |
3309 | |
3310 | int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) |
3311 | { |
3312 | int err; |
3313 | struct task_struct *t = current; |
3314 | err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), |
3315 | &uss->ss_sp) | |
3316 | __put_user(t->sas_ss_flags, &uss->ss_flags) | |
3317 | __put_user(t->sas_ss_size, &uss->ss_size); |
3318 | if (err) |
3319 | return err; |
3320 | if (t->sas_ss_flags & SS_AUTODISARM) |
3321 | sas_ss_reset(t); |
3322 | return 0; |
3323 | } |
3324 | #endif |
3325 | |
3326 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
3327 | |
3328 | /** |
3329 | * sys_sigpending - examine pending signals |
3330 | * @set: where mask of pending signal is returned |
3331 | */ |
3332 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
3333 | { |
3334 | return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); |
3335 | } |
3336 | |
3337 | #endif |
3338 | |
3339 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
3340 | /** |
3341 | * sys_sigprocmask - examine and change blocked signals |
3342 | * @how: whether to add, remove, or set signals |
3343 | * @nset: signals to add or remove (if non-null) |
3344 | * @oset: previous value of signal mask if non-null |
3345 | * |
3346 | * Some platforms have their own version with special arguments; |
3347 | * others support only sys_rt_sigprocmask. |
3348 | */ |
3349 | |
3350 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
3351 | old_sigset_t __user *, oset) |
3352 | { |
3353 | old_sigset_t old_set, new_set; |
3354 | sigset_t new_blocked; |
3355 | |
3356 | old_set = current->blocked.sig[0]; |
3357 | |
3358 | if (nset) { |
3359 | if (copy_from_user(&new_set, nset, sizeof(*nset))) |
3360 | return -EFAULT; |
3361 | |
3362 | new_blocked = current->blocked; |
3363 | |
3364 | switch (how) { |
3365 | case SIG_BLOCK: |
3366 | sigaddsetmask(&new_blocked, new_set); |
3367 | break; |
3368 | case SIG_UNBLOCK: |
3369 | sigdelsetmask(&new_blocked, new_set); |
3370 | break; |
3371 | case SIG_SETMASK: |
3372 | new_blocked.sig[0] = new_set; |
3373 | break; |
3374 | default: |
3375 | return -EINVAL; |
3376 | } |
3377 | |
3378 | set_current_blocked(&new_blocked); |
3379 | } |
3380 | |
3381 | if (oset) { |
3382 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
3383 | return -EFAULT; |
3384 | } |
3385 | |
3386 | return 0; |
3387 | } |
3388 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
3389 | |
3390 | #ifndef CONFIG_ODD_RT_SIGACTION |
3391 | /** |
3392 | * sys_rt_sigaction - alter an action taken by a process |
3393 | * @sig: signal to be sent |
3394 | * @act: new sigaction |
3395 | * @oact: used to save the previous sigaction |
3396 | * @sigsetsize: size of sigset_t type |
3397 | */ |
3398 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3399 | const struct sigaction __user *, act, |
3400 | struct sigaction __user *, oact, |
3401 | size_t, sigsetsize) |
3402 | { |
3403 | struct k_sigaction new_sa, old_sa; |
3404 | int ret = -EINVAL; |
3405 | |
3406 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3407 | if (sigsetsize != sizeof(sigset_t)) |
3408 | goto out; |
3409 | |
3410 | if (act) { |
3411 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
3412 | return -EFAULT; |
3413 | } |
3414 | |
3415 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); |
3416 | |
3417 | if (!ret && oact) { |
3418 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
3419 | return -EFAULT; |
3420 | } |
3421 | out: |
3422 | return ret; |
3423 | } |
3424 | #ifdef CONFIG_COMPAT |
3425 | COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3426 | const struct compat_sigaction __user *, act, |
3427 | struct compat_sigaction __user *, oact, |
3428 | compat_size_t, sigsetsize) |
3429 | { |
3430 | struct k_sigaction new_ka, old_ka; |
3431 | compat_sigset_t mask; |
3432 | #ifdef __ARCH_HAS_SA_RESTORER |
3433 | compat_uptr_t restorer; |
3434 | #endif |
3435 | int ret; |
3436 | |
3437 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3438 | if (sigsetsize != sizeof(compat_sigset_t)) |
3439 | return -EINVAL; |
3440 | |
3441 | if (act) { |
3442 | compat_uptr_t handler; |
3443 | ret = get_user(handler, &act->sa_handler); |
3444 | new_ka.sa.sa_handler = compat_ptr(handler); |
3445 | #ifdef __ARCH_HAS_SA_RESTORER |
3446 | ret |= get_user(restorer, &act->sa_restorer); |
3447 | new_ka.sa.sa_restorer = compat_ptr(restorer); |
3448 | #endif |
3449 | ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); |
3450 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
3451 | if (ret) |
3452 | return -EFAULT; |
3453 | sigset_from_compat(&new_ka.sa.sa_mask, &mask); |
3454 | } |
3455 | |
3456 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
3457 | if (!ret && oact) { |
3458 | sigset_to_compat(&mask, &old_ka.sa.sa_mask); |
3459 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
3460 | &oact->sa_handler); |
3461 | ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); |
3462 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
3463 | #ifdef __ARCH_HAS_SA_RESTORER |
3464 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
3465 | &oact->sa_restorer); |
3466 | #endif |
3467 | } |
3468 | return ret; |
3469 | } |
3470 | #endif |
3471 | #endif /* !CONFIG_ODD_RT_SIGACTION */ |
3472 | |
3473 | #ifdef CONFIG_OLD_SIGACTION |
3474 | SYSCALL_DEFINE3(sigaction, int, sig, |
3475 | const struct old_sigaction __user *, act, |
3476 | struct old_sigaction __user *, oact) |
3477 | { |
3478 | struct k_sigaction new_ka, old_ka; |
3479 | int ret; |
3480 | |
3481 | if (act) { |
3482 | old_sigset_t mask; |
3483 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
3484 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
3485 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || |
3486 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
3487 | __get_user(mask, &act->sa_mask)) |
3488 | return -EFAULT; |
3489 | #ifdef __ARCH_HAS_KA_RESTORER |
3490 | new_ka.ka_restorer = NULL; |
3491 | #endif |
3492 | siginitset(&new_ka.sa.sa_mask, mask); |
3493 | } |
3494 | |
3495 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
3496 | |
3497 | if (!ret && oact) { |
3498 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
3499 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
3500 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || |
3501 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
3502 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) |
3503 | return -EFAULT; |
3504 | } |
3505 | |
3506 | return ret; |
3507 | } |
3508 | #endif |
3509 | #ifdef CONFIG_COMPAT_OLD_SIGACTION |
3510 | COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, |
3511 | const struct compat_old_sigaction __user *, act, |
3512 | struct compat_old_sigaction __user *, oact) |
3513 | { |
3514 | struct k_sigaction new_ka, old_ka; |
3515 | int ret; |
3516 | compat_old_sigset_t mask; |
3517 | compat_uptr_t handler, restorer; |
3518 | |
3519 | if (act) { |
3520 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
3521 | __get_user(handler, &act->sa_handler) || |
3522 | __get_user(restorer, &act->sa_restorer) || |
3523 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
3524 | __get_user(mask, &act->sa_mask)) |
3525 | return -EFAULT; |
3526 | |
3527 | #ifdef __ARCH_HAS_KA_RESTORER |
3528 | new_ka.ka_restorer = NULL; |
3529 | #endif |
3530 | new_ka.sa.sa_handler = compat_ptr(handler); |
3531 | new_ka.sa.sa_restorer = compat_ptr(restorer); |
3532 | siginitset(&new_ka.sa.sa_mask, mask); |
3533 | } |
3534 | |
3535 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
3536 | |
3537 | if (!ret && oact) { |
3538 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
3539 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), |
3540 | &oact->sa_handler) || |
3541 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
3542 | &oact->sa_restorer) || |
3543 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
3544 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) |
3545 | return -EFAULT; |
3546 | } |
3547 | return ret; |
3548 | } |
3549 | #endif |
3550 | |
3551 | #ifdef CONFIG_SGETMASK_SYSCALL |
3552 | |
3553 | /* |
3554 | * For backwards compatibility. Functionality superseded by sigprocmask. |
3555 | */ |
3556 | SYSCALL_DEFINE0(sgetmask) |
3557 | { |
3558 | /* SMP safe */ |
3559 | return current->blocked.sig[0]; |
3560 | } |
3561 | |
3562 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
3563 | { |
3564 | int old = current->blocked.sig[0]; |
3565 | sigset_t newset; |
3566 | |
3567 | siginitset(&newset, newmask); |
3568 | set_current_blocked(&newset); |
3569 | |
3570 | return old; |
3571 | } |
3572 | #endif /* CONFIG_SGETMASK_SYSCALL */ |
3573 | |
3574 | #ifdef __ARCH_WANT_SYS_SIGNAL |
3575 | /* |
3576 | * For backwards compatibility. Functionality superseded by sigaction. |
3577 | */ |
3578 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
3579 | { |
3580 | struct k_sigaction new_sa, old_sa; |
3581 | int ret; |
3582 | |
3583 | new_sa.sa.sa_handler = handler; |
3584 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
3585 | sigemptyset(&new_sa.sa.sa_mask); |
3586 | |
3587 | ret = do_sigaction(sig, &new_sa, &old_sa); |
3588 | |
3589 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
3590 | } |
3591 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
3592 | |
3593 | #ifdef __ARCH_WANT_SYS_PAUSE |
3594 | |
3595 | SYSCALL_DEFINE0(pause) |
3596 | { |
3597 | while (!signal_pending(current)) { |
3598 | __set_current_state(TASK_INTERRUPTIBLE); |
3599 | schedule(); |
3600 | } |
3601 | return -ERESTARTNOHAND; |
3602 | } |
3603 | |
3604 | #endif |
3605 | |
3606 | static int sigsuspend(sigset_t *set) |
3607 | { |
3608 | current->saved_sigmask = current->blocked; |
3609 | set_current_blocked(set); |
3610 | |
3611 | while (!signal_pending(current)) { |
3612 | __set_current_state(TASK_INTERRUPTIBLE); |
3613 | schedule(); |
3614 | } |
3615 | set_restore_sigmask(); |
3616 | return -ERESTARTNOHAND; |
3617 | } |
3618 | |
3619 | /** |
3620 | * sys_rt_sigsuspend - replace the signal mask for a value with the |
3621 | * @unewset value until a signal is received |
3622 | * @unewset: new signal mask value |
3623 | * @sigsetsize: size of sigset_t type |
3624 | */ |
3625 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
3626 | { |
3627 | sigset_t newset; |
3628 | |
3629 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3630 | if (sigsetsize != sizeof(sigset_t)) |
3631 | return -EINVAL; |
3632 | |
3633 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
3634 | return -EFAULT; |
3635 | return sigsuspend(&newset); |
3636 | } |
3637 | |
3638 | #ifdef CONFIG_COMPAT |
3639 | COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) |
3640 | { |
3641 | #ifdef __BIG_ENDIAN |
3642 | sigset_t newset; |
3643 | compat_sigset_t newset32; |
3644 | |
3645 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3646 | if (sigsetsize != sizeof(sigset_t)) |
3647 | return -EINVAL; |
3648 | |
3649 | if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) |
3650 | return -EFAULT; |
3651 | sigset_from_compat(&newset, &newset32); |
3652 | return sigsuspend(&newset); |
3653 | #else |
3654 | /* on little-endian bitmaps don't care about granularity */ |
3655 | return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize); |
3656 | #endif |
3657 | } |
3658 | #endif |
3659 | |
3660 | #ifdef CONFIG_OLD_SIGSUSPEND |
3661 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) |
3662 | { |
3663 | sigset_t blocked; |
3664 | siginitset(&blocked, mask); |
3665 | return sigsuspend(&blocked); |
3666 | } |
3667 | #endif |
3668 | #ifdef CONFIG_OLD_SIGSUSPEND3 |
3669 | SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) |
3670 | { |
3671 | sigset_t blocked; |
3672 | siginitset(&blocked, mask); |
3673 | return sigsuspend(&blocked); |
3674 | } |
3675 | #endif |
3676 | |
3677 | __weak const char *arch_vma_name(struct vm_area_struct *vma) |
3678 | { |
3679 | return NULL; |
3680 | } |
3681 | |
3682 | void __init signals_init(void) |
3683 | { |
3684 | /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */ |
3685 | BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE |
3686 | != offsetof(struct siginfo, _sifields._pad)); |
3687 | |
3688 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
3689 | } |
3690 | |
3691 | #ifdef CONFIG_KGDB_KDB |
3692 | #include <linux/kdb.h> |
3693 | /* |
3694 | * kdb_send_sig_info - Allows kdb to send signals without exposing |
3695 | * signal internals. This function checks if the required locks are |
3696 | * available before calling the main signal code, to avoid kdb |
3697 | * deadlocks. |
3698 | */ |
3699 | void |
3700 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) |
3701 | { |
3702 | static struct task_struct *kdb_prev_t; |
3703 | int sig, new_t; |
3704 | if (!spin_trylock(&t->sighand->siglock)) { |
3705 | kdb_printf("Can't do kill command now.\n" |
3706 | "The sigmask lock is held somewhere else in " |
3707 | "kernel, try again later\n"); |
3708 | return; |
3709 | } |
3710 | spin_unlock(&t->sighand->siglock); |
3711 | new_t = kdb_prev_t != t; |
3712 | kdb_prev_t = t; |
3713 | if (t->state != TASK_RUNNING && new_t) { |
3714 | kdb_printf("Process is not RUNNING, sending a signal from " |
3715 | "kdb risks deadlock\n" |
3716 | "on the run queue locks. " |
3717 | "The signal has _not_ been sent.\n" |
3718 | "Reissue the kill command if you want to risk " |
3719 | "the deadlock.\n"); |
3720 | return; |
3721 | } |
3722 | sig = info->si_signo; |
3723 | if (send_sig_info(sig, info, t)) |
3724 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
3725 | sig, t->pid); |
3726 | else |
3727 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); |
3728 | } |
3729 | #endif /* CONFIG_KGDB_KDB */ |
3730 |