summaryrefslogtreecommitdiff
path: root/kernel/kthread.c (plain)
blob: fbc230e4196905504cc0a0ccd5d9b74e6c926bab
1/* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/cpuset.h>
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/export.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/freezer.h>
19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
21#include <linux/cgroup.h>
22#include <trace/events/sched.h>
23
24static DEFINE_SPINLOCK(kthread_create_lock);
25static LIST_HEAD(kthread_create_list);
26struct task_struct *kthreadd_task;
27
28struct kthread_create_info
29{
30 /* Information passed to kthread() from kthreadd. */
31 int (*threadfn)(void *data);
32 void *data;
33 int node;
34
35 /* Result passed back to kthread_create() from kthreadd. */
36 struct task_struct *result;
37 struct completion *done;
38
39 struct list_head list;
40};
41
42struct kthread {
43 unsigned long flags;
44 unsigned int cpu;
45 void *data;
46 struct completion parked;
47 struct completion exited;
48};
49
50enum KTHREAD_BITS {
51 KTHREAD_IS_PER_CPU = 0,
52 KTHREAD_SHOULD_STOP,
53 KTHREAD_SHOULD_PARK,
54 KTHREAD_IS_PARKED,
55};
56
57#define __to_kthread(vfork) \
58 container_of(vfork, struct kthread, exited)
59
60static inline struct kthread *to_kthread(struct task_struct *k)
61{
62 return __to_kthread(k->vfork_done);
63}
64
65static struct kthread *to_live_kthread(struct task_struct *k)
66{
67 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
68 if (likely(vfork) && try_get_task_stack(k))
69 return __to_kthread(vfork);
70 return NULL;
71}
72
73/**
74 * kthread_should_stop - should this kthread return now?
75 *
76 * When someone calls kthread_stop() on your kthread, it will be woken
77 * and this will return true. You should then return, and your return
78 * value will be passed through to kthread_stop().
79 */
80bool kthread_should_stop(void)
81{
82 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
83}
84EXPORT_SYMBOL(kthread_should_stop);
85
86/**
87 * kthread_should_park - should this kthread park now?
88 *
89 * When someone calls kthread_park() on your kthread, it will be woken
90 * and this will return true. You should then do the necessary
91 * cleanup and call kthread_parkme()
92 *
93 * Similar to kthread_should_stop(), but this keeps the thread alive
94 * and in a park position. kthread_unpark() "restarts" the thread and
95 * calls the thread function again.
96 */
97bool kthread_should_park(void)
98{
99 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
100}
101EXPORT_SYMBOL_GPL(kthread_should_park);
102
103/**
104 * kthread_freezable_should_stop - should this freezable kthread return now?
105 * @was_frozen: optional out parameter, indicates whether %current was frozen
106 *
107 * kthread_should_stop() for freezable kthreads, which will enter
108 * refrigerator if necessary. This function is safe from kthread_stop() /
109 * freezer deadlock and freezable kthreads should use this function instead
110 * of calling try_to_freeze() directly.
111 */
112bool kthread_freezable_should_stop(bool *was_frozen)
113{
114 bool frozen = false;
115
116 might_sleep();
117
118 if (unlikely(freezing(current)))
119 frozen = __refrigerator(true);
120
121 if (was_frozen)
122 *was_frozen = frozen;
123
124 return kthread_should_stop();
125}
126EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
127
128/**
129 * kthread_data - return data value specified on kthread creation
130 * @task: kthread task in question
131 *
132 * Return the data value specified when kthread @task was created.
133 * The caller is responsible for ensuring the validity of @task when
134 * calling this function.
135 */
136void *kthread_data(struct task_struct *task)
137{
138 return to_kthread(task)->data;
139}
140
141/**
142 * kthread_probe_data - speculative version of kthread_data()
143 * @task: possible kthread task in question
144 *
145 * @task could be a kthread task. Return the data value specified when it
146 * was created if accessible. If @task isn't a kthread task or its data is
147 * inaccessible for any reason, %NULL is returned. This function requires
148 * that @task itself is safe to dereference.
149 */
150void *kthread_probe_data(struct task_struct *task)
151{
152 struct kthread *kthread = to_kthread(task);
153 void *data = NULL;
154
155 probe_kernel_read(&data, &kthread->data, sizeof(data));
156 return data;
157}
158
159static void __kthread_parkme(struct kthread *self)
160{
161 __set_current_state(TASK_PARKED);
162 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
163 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
164 complete(&self->parked);
165 schedule();
166 __set_current_state(TASK_PARKED);
167 }
168 clear_bit(KTHREAD_IS_PARKED, &self->flags);
169 __set_current_state(TASK_RUNNING);
170}
171
172void kthread_parkme(void)
173{
174 __kthread_parkme(to_kthread(current));
175}
176EXPORT_SYMBOL_GPL(kthread_parkme);
177
178static int kthread(void *_create)
179{
180 /* Copy data: it's on kthread's stack */
181 struct kthread_create_info *create = _create;
182 int (*threadfn)(void *data) = create->threadfn;
183 void *data = create->data;
184 struct completion *done;
185 struct kthread self;
186 int ret;
187
188 self.flags = 0;
189 self.data = data;
190 init_completion(&self.exited);
191 init_completion(&self.parked);
192 current->vfork_done = &self.exited;
193
194 /* If user was SIGKILLed, I release the structure. */
195 done = xchg(&create->done, NULL);
196 if (!done) {
197 kfree(create);
198 do_exit(-EINTR);
199 }
200 /* OK, tell user we're spawned, wait for stop or wakeup */
201 __set_current_state(TASK_UNINTERRUPTIBLE);
202 create->result = current;
203 complete(done);
204 schedule();
205
206 ret = -EINTR;
207
208 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
209 cgroup_kthread_ready();
210 __kthread_parkme(&self);
211 ret = threadfn(data);
212 }
213 /* we can't just return, we must preserve "self" on stack */
214 do_exit(ret);
215}
216
217/* called from do_fork() to get node information for about to be created task */
218int tsk_fork_get_node(struct task_struct *tsk)
219{
220#ifdef CONFIG_NUMA
221 if (tsk == kthreadd_task)
222 return tsk->pref_node_fork;
223#endif
224 return NUMA_NO_NODE;
225}
226
227static void create_kthread(struct kthread_create_info *create)
228{
229 int pid;
230
231#ifdef CONFIG_NUMA
232 current->pref_node_fork = create->node;
233#endif
234 /* We want our own signal handler (we take no signals by default). */
235 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
236 if (pid < 0) {
237 /* If user was SIGKILLed, I release the structure. */
238 struct completion *done = xchg(&create->done, NULL);
239
240 if (!done) {
241 kfree(create);
242 return;
243 }
244 create->result = ERR_PTR(pid);
245 complete(done);
246 }
247}
248
249static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
250 void *data, int node,
251 const char namefmt[],
252 va_list args)
253{
254 DECLARE_COMPLETION_ONSTACK(done);
255 struct task_struct *task;
256 struct kthread_create_info *create = kmalloc(sizeof(*create),
257 GFP_KERNEL);
258
259 if (!create)
260 return ERR_PTR(-ENOMEM);
261 create->threadfn = threadfn;
262 create->data = data;
263 create->node = node;
264 create->done = &done;
265
266 spin_lock(&kthread_create_lock);
267 list_add_tail(&create->list, &kthread_create_list);
268 spin_unlock(&kthread_create_lock);
269
270 wake_up_process(kthreadd_task);
271 /*
272 * Wait for completion in killable state, for I might be chosen by
273 * the OOM killer while kthreadd is trying to allocate memory for
274 * new kernel thread.
275 */
276 if (unlikely(wait_for_completion_killable(&done))) {
277 /*
278 * If I was SIGKILLed before kthreadd (or new kernel thread)
279 * calls complete(), leave the cleanup of this structure to
280 * that thread.
281 */
282 if (xchg(&create->done, NULL))
283 return ERR_PTR(-EINTR);
284 /*
285 * kthreadd (or new kernel thread) will call complete()
286 * shortly.
287 */
288 wait_for_completion(&done);
289 }
290 task = create->result;
291 if (!IS_ERR(task)) {
292 static const struct sched_param param = { .sched_priority = 0 };
293 char name[TASK_COMM_LEN];
294
295 /*
296 * task is already visible to other tasks, so updating
297 * COMM must be protected.
298 */
299 vsnprintf(name, sizeof(name), namefmt, args);
300 set_task_comm(task, name);
301 /*
302 * root may have changed our (kthreadd's) priority or CPU mask.
303 * The kernel thread should not inherit these properties.
304 */
305 sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
306 set_cpus_allowed_ptr(task, cpu_all_mask);
307 }
308 kfree(create);
309 return task;
310}
311
312/**
313 * kthread_create_on_node - create a kthread.
314 * @threadfn: the function to run until signal_pending(current).
315 * @data: data ptr for @threadfn.
316 * @node: task and thread structures for the thread are allocated on this node
317 * @namefmt: printf-style name for the thread.
318 *
319 * Description: This helper function creates and names a kernel
320 * thread. The thread will be stopped: use wake_up_process() to start
321 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
322 * is affine to all CPUs.
323 *
324 * If thread is going to be bound on a particular cpu, give its node
325 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
326 * When woken, the thread will run @threadfn() with @data as its
327 * argument. @threadfn() can either call do_exit() directly if it is a
328 * standalone thread for which no one will call kthread_stop(), or
329 * return when 'kthread_should_stop()' is true (which means
330 * kthread_stop() has been called). The return value should be zero
331 * or a negative error number; it will be passed to kthread_stop().
332 *
333 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
334 */
335struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
336 void *data, int node,
337 const char namefmt[],
338 ...)
339{
340 struct task_struct *task;
341 va_list args;
342
343 va_start(args, namefmt);
344 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
345 va_end(args);
346
347 return task;
348}
349EXPORT_SYMBOL(kthread_create_on_node);
350
351static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
352{
353 unsigned long flags;
354
355 if (!wait_task_inactive(p, state)) {
356 WARN_ON(1);
357 return;
358 }
359
360 /* It's safe because the task is inactive. */
361 raw_spin_lock_irqsave(&p->pi_lock, flags);
362 do_set_cpus_allowed(p, mask);
363 p->flags |= PF_NO_SETAFFINITY;
364 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
365}
366
367static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
368{
369 __kthread_bind_mask(p, cpumask_of(cpu), state);
370}
371
372void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
373{
374 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
375}
376
377/**
378 * kthread_bind - bind a just-created kthread to a cpu.
379 * @p: thread created by kthread_create().
380 * @cpu: cpu (might not be online, must be possible) for @k to run on.
381 *
382 * Description: This function is equivalent to set_cpus_allowed(),
383 * except that @cpu doesn't need to be online, and the thread must be
384 * stopped (i.e., just returned from kthread_create()).
385 */
386void kthread_bind(struct task_struct *p, unsigned int cpu)
387{
388 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
389}
390EXPORT_SYMBOL(kthread_bind);
391
392/**
393 * kthread_create_on_cpu - Create a cpu bound kthread
394 * @threadfn: the function to run until signal_pending(current).
395 * @data: data ptr for @threadfn.
396 * @cpu: The cpu on which the thread should be bound,
397 * @namefmt: printf-style name for the thread. Format is restricted
398 * to "name.*%u". Code fills in cpu number.
399 *
400 * Description: This helper function creates and names a kernel thread
401 * The thread will be woken and put into park mode.
402 */
403struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
404 void *data, unsigned int cpu,
405 const char *namefmt)
406{
407 struct task_struct *p;
408
409 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
410 cpu);
411 if (IS_ERR(p))
412 return p;
413 kthread_bind(p, cpu);
414 /* CPU hotplug need to bind once again when unparking the thread. */
415 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
416 to_kthread(p)->cpu = cpu;
417 return p;
418}
419
420static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
421{
422 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
423 /*
424 * We clear the IS_PARKED bit here as we don't wait
425 * until the task has left the park code. So if we'd
426 * park before that happens we'd see the IS_PARKED bit
427 * which might be about to be cleared.
428 */
429 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
430 /*
431 * Newly created kthread was parked when the CPU was offline.
432 * The binding was lost and we need to set it again.
433 */
434 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
435 __kthread_bind(k, kthread->cpu, TASK_PARKED);
436 wake_up_state(k, TASK_PARKED);
437 }
438}
439
440/**
441 * kthread_unpark - unpark a thread created by kthread_create().
442 * @k: thread created by kthread_create().
443 *
444 * Sets kthread_should_park() for @k to return false, wakes it, and
445 * waits for it to return. If the thread is marked percpu then its
446 * bound to the cpu again.
447 */
448void kthread_unpark(struct task_struct *k)
449{
450 struct kthread *kthread = to_live_kthread(k);
451
452 if (kthread) {
453 __kthread_unpark(k, kthread);
454 put_task_stack(k);
455 }
456}
457EXPORT_SYMBOL_GPL(kthread_unpark);
458
459/**
460 * kthread_park - park a thread created by kthread_create().
461 * @k: thread created by kthread_create().
462 *
463 * Sets kthread_should_park() for @k to return true, wakes it, and
464 * waits for it to return. This can also be called after kthread_create()
465 * instead of calling wake_up_process(): the thread will park without
466 * calling threadfn().
467 *
468 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
469 * If called by the kthread itself just the park bit is set.
470 */
471int kthread_park(struct task_struct *k)
472{
473 struct kthread *kthread = to_live_kthread(k);
474 int ret = -ENOSYS;
475
476 if (kthread) {
477 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
478 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
479 if (k != current) {
480 wake_up_process(k);
481 wait_for_completion(&kthread->parked);
482 }
483 }
484 put_task_stack(k);
485 ret = 0;
486 }
487 return ret;
488}
489EXPORT_SYMBOL_GPL(kthread_park);
490
491/**
492 * kthread_stop - stop a thread created by kthread_create().
493 * @k: thread created by kthread_create().
494 *
495 * Sets kthread_should_stop() for @k to return true, wakes it, and
496 * waits for it to exit. This can also be called after kthread_create()
497 * instead of calling wake_up_process(): the thread will exit without
498 * calling threadfn().
499 *
500 * If threadfn() may call do_exit() itself, the caller must ensure
501 * task_struct can't go away.
502 *
503 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
504 * was never called.
505 */
506int kthread_stop(struct task_struct *k)
507{
508 struct kthread *kthread;
509 int ret;
510
511 trace_sched_kthread_stop(k);
512
513 get_task_struct(k);
514 kthread = to_live_kthread(k);
515 if (kthread) {
516 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
517 __kthread_unpark(k, kthread);
518 wake_up_process(k);
519 wait_for_completion(&kthread->exited);
520 put_task_stack(k);
521 }
522 ret = k->exit_code;
523 put_task_struct(k);
524
525 trace_sched_kthread_stop_ret(ret);
526 return ret;
527}
528EXPORT_SYMBOL(kthread_stop);
529
530int kthreadd(void *unused)
531{
532 struct task_struct *tsk = current;
533
534 /* Setup a clean context for our children to inherit. */
535 set_task_comm(tsk, "kthreadd");
536 ignore_signals(tsk);
537 set_cpus_allowed_ptr(tsk, cpu_all_mask);
538 set_mems_allowed(node_states[N_MEMORY]);
539
540 current->flags |= PF_NOFREEZE;
541 cgroup_init_kthreadd();
542
543 for (;;) {
544 set_current_state(TASK_INTERRUPTIBLE);
545 if (list_empty(&kthread_create_list))
546 schedule();
547 __set_current_state(TASK_RUNNING);
548
549 spin_lock(&kthread_create_lock);
550 while (!list_empty(&kthread_create_list)) {
551 struct kthread_create_info *create;
552
553 create = list_entry(kthread_create_list.next,
554 struct kthread_create_info, list);
555 list_del_init(&create->list);
556 spin_unlock(&kthread_create_lock);
557
558 create_kthread(create);
559
560 spin_lock(&kthread_create_lock);
561 }
562 spin_unlock(&kthread_create_lock);
563 }
564
565 return 0;
566}
567
568void __kthread_init_worker(struct kthread_worker *worker,
569 const char *name,
570 struct lock_class_key *key)
571{
572 memset(worker, 0, sizeof(struct kthread_worker));
573 spin_lock_init(&worker->lock);
574 lockdep_set_class_and_name(&worker->lock, key, name);
575 INIT_LIST_HEAD(&worker->work_list);
576 INIT_LIST_HEAD(&worker->delayed_work_list);
577}
578EXPORT_SYMBOL_GPL(__kthread_init_worker);
579
580/**
581 * kthread_worker_fn - kthread function to process kthread_worker
582 * @worker_ptr: pointer to initialized kthread_worker
583 *
584 * This function implements the main cycle of kthread worker. It processes
585 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
586 * is empty.
587 *
588 * The works are not allowed to keep any locks, disable preemption or interrupts
589 * when they finish. There is defined a safe point for freezing when one work
590 * finishes and before a new one is started.
591 *
592 * Also the works must not be handled by more than one worker at the same time,
593 * see also kthread_queue_work().
594 */
595int kthread_worker_fn(void *worker_ptr)
596{
597 struct kthread_worker *worker = worker_ptr;
598 struct kthread_work *work;
599
600 /*
601 * FIXME: Update the check and remove the assignment when all kthread
602 * worker users are created using kthread_create_worker*() functions.
603 */
604 WARN_ON(worker->task && worker->task != current);
605 worker->task = current;
606
607 if (worker->flags & KTW_FREEZABLE)
608 set_freezable();
609
610repeat:
611 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
612
613 if (kthread_should_stop()) {
614 __set_current_state(TASK_RUNNING);
615 spin_lock_irq(&worker->lock);
616 worker->task = NULL;
617 spin_unlock_irq(&worker->lock);
618 return 0;
619 }
620
621 work = NULL;
622 spin_lock_irq(&worker->lock);
623 if (!list_empty(&worker->work_list)) {
624 work = list_first_entry(&worker->work_list,
625 struct kthread_work, node);
626 list_del_init(&work->node);
627 }
628 worker->current_work = work;
629 spin_unlock_irq(&worker->lock);
630
631 if (work) {
632 __set_current_state(TASK_RUNNING);
633 work->func(work);
634 } else if (!freezing(current))
635 schedule();
636
637 try_to_freeze();
638 goto repeat;
639}
640EXPORT_SYMBOL_GPL(kthread_worker_fn);
641
642static struct kthread_worker *
643__kthread_create_worker(int cpu, unsigned int flags,
644 const char namefmt[], va_list args)
645{
646 struct kthread_worker *worker;
647 struct task_struct *task;
648
649 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
650 if (!worker)
651 return ERR_PTR(-ENOMEM);
652
653 kthread_init_worker(worker);
654
655 if (cpu >= 0) {
656 char name[TASK_COMM_LEN];
657
658 /*
659 * kthread_create_worker_on_cpu() allows to pass a generic
660 * namefmt in compare with kthread_create_on_cpu. We need
661 * to format it here.
662 */
663 vsnprintf(name, sizeof(name), namefmt, args);
664 task = kthread_create_on_cpu(kthread_worker_fn, worker,
665 cpu, name);
666 } else {
667 task = __kthread_create_on_node(kthread_worker_fn, worker,
668 -1, namefmt, args);
669 }
670
671 if (IS_ERR(task))
672 goto fail_task;
673
674 worker->flags = flags;
675 worker->task = task;
676 wake_up_process(task);
677 return worker;
678
679fail_task:
680 kfree(worker);
681 return ERR_CAST(task);
682}
683
684/**
685 * kthread_create_worker - create a kthread worker
686 * @flags: flags modifying the default behavior of the worker
687 * @namefmt: printf-style name for the kthread worker (task).
688 *
689 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
690 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
691 * when the worker was SIGKILLed.
692 */
693struct kthread_worker *
694kthread_create_worker(unsigned int flags, const char namefmt[], ...)
695{
696 struct kthread_worker *worker;
697 va_list args;
698
699 va_start(args, namefmt);
700 worker = __kthread_create_worker(-1, flags, namefmt, args);
701 va_end(args);
702
703 return worker;
704}
705EXPORT_SYMBOL(kthread_create_worker);
706
707/**
708 * kthread_create_worker_on_cpu - create a kthread worker and bind it
709 * it to a given CPU and the associated NUMA node.
710 * @cpu: CPU number
711 * @flags: flags modifying the default behavior of the worker
712 * @namefmt: printf-style name for the kthread worker (task).
713 *
714 * Use a valid CPU number if you want to bind the kthread worker
715 * to the given CPU and the associated NUMA node.
716 *
717 * A good practice is to add the cpu number also into the worker name.
718 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
719 *
720 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
721 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
722 * when the worker was SIGKILLed.
723 */
724struct kthread_worker *
725kthread_create_worker_on_cpu(int cpu, unsigned int flags,
726 const char namefmt[], ...)
727{
728 struct kthread_worker *worker;
729 va_list args;
730
731 va_start(args, namefmt);
732 worker = __kthread_create_worker(cpu, flags, namefmt, args);
733 va_end(args);
734
735 return worker;
736}
737EXPORT_SYMBOL(kthread_create_worker_on_cpu);
738
739/*
740 * Returns true when the work could not be queued at the moment.
741 * It happens when it is already pending in a worker list
742 * or when it is being cancelled.
743 */
744static inline bool queuing_blocked(struct kthread_worker *worker,
745 struct kthread_work *work)
746{
747 lockdep_assert_held(&worker->lock);
748
749 return !list_empty(&work->node) || work->canceling;
750}
751
752static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
753 struct kthread_work *work)
754{
755 lockdep_assert_held(&worker->lock);
756 WARN_ON_ONCE(!list_empty(&work->node));
757 /* Do not use a work with >1 worker, see kthread_queue_work() */
758 WARN_ON_ONCE(work->worker && work->worker != worker);
759}
760
761/* insert @work before @pos in @worker */
762static void kthread_insert_work(struct kthread_worker *worker,
763 struct kthread_work *work,
764 struct list_head *pos)
765{
766 kthread_insert_work_sanity_check(worker, work);
767
768 list_add_tail(&work->node, pos);
769 work->worker = worker;
770 if (!worker->current_work && likely(worker->task))
771 wake_up_process(worker->task);
772}
773
774/**
775 * kthread_queue_work - queue a kthread_work
776 * @worker: target kthread_worker
777 * @work: kthread_work to queue
778 *
779 * Queue @work to work processor @task for async execution. @task
780 * must have been created with kthread_worker_create(). Returns %true
781 * if @work was successfully queued, %false if it was already pending.
782 *
783 * Reinitialize the work if it needs to be used by another worker.
784 * For example, when the worker was stopped and started again.
785 */
786bool kthread_queue_work(struct kthread_worker *worker,
787 struct kthread_work *work)
788{
789 bool ret = false;
790 unsigned long flags;
791
792 spin_lock_irqsave(&worker->lock, flags);
793 if (!queuing_blocked(worker, work)) {
794 kthread_insert_work(worker, work, &worker->work_list);
795 ret = true;
796 }
797 spin_unlock_irqrestore(&worker->lock, flags);
798 return ret;
799}
800EXPORT_SYMBOL_GPL(kthread_queue_work);
801
802/**
803 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
804 * delayed work when the timer expires.
805 * @__data: pointer to the data associated with the timer
806 *
807 * The format of the function is defined by struct timer_list.
808 * It should have been called from irqsafe timer with irq already off.
809 */
810void kthread_delayed_work_timer_fn(unsigned long __data)
811{
812 struct kthread_delayed_work *dwork =
813 (struct kthread_delayed_work *)__data;
814 struct kthread_work *work = &dwork->work;
815 struct kthread_worker *worker = work->worker;
816
817 /*
818 * This might happen when a pending work is reinitialized.
819 * It means that it is used a wrong way.
820 */
821 if (WARN_ON_ONCE(!worker))
822 return;
823
824 spin_lock(&worker->lock);
825 /* Work must not be used with >1 worker, see kthread_queue_work(). */
826 WARN_ON_ONCE(work->worker != worker);
827
828 /* Move the work from worker->delayed_work_list. */
829 WARN_ON_ONCE(list_empty(&work->node));
830 list_del_init(&work->node);
831 kthread_insert_work(worker, work, &worker->work_list);
832
833 spin_unlock(&worker->lock);
834}
835EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
836
837void __kthread_queue_delayed_work(struct kthread_worker *worker,
838 struct kthread_delayed_work *dwork,
839 unsigned long delay)
840{
841 struct timer_list *timer = &dwork->timer;
842 struct kthread_work *work = &dwork->work;
843
844 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
845 timer->data != (unsigned long)dwork);
846
847 /*
848 * If @delay is 0, queue @dwork->work immediately. This is for
849 * both optimization and correctness. The earliest @timer can
850 * expire is on the closest next tick and delayed_work users depend
851 * on that there's no such delay when @delay is 0.
852 */
853 if (!delay) {
854 kthread_insert_work(worker, work, &worker->work_list);
855 return;
856 }
857
858 /* Be paranoid and try to detect possible races already now. */
859 kthread_insert_work_sanity_check(worker, work);
860
861 list_add(&work->node, &worker->delayed_work_list);
862 work->worker = worker;
863 timer_stats_timer_set_start_info(&dwork->timer);
864 timer->expires = jiffies + delay;
865 add_timer(timer);
866}
867
868/**
869 * kthread_queue_delayed_work - queue the associated kthread work
870 * after a delay.
871 * @worker: target kthread_worker
872 * @dwork: kthread_delayed_work to queue
873 * @delay: number of jiffies to wait before queuing
874 *
875 * If the work has not been pending it starts a timer that will queue
876 * the work after the given @delay. If @delay is zero, it queues the
877 * work immediately.
878 *
879 * Return: %false if the @work has already been pending. It means that
880 * either the timer was running or the work was queued. It returns %true
881 * otherwise.
882 */
883bool kthread_queue_delayed_work(struct kthread_worker *worker,
884 struct kthread_delayed_work *dwork,
885 unsigned long delay)
886{
887 struct kthread_work *work = &dwork->work;
888 unsigned long flags;
889 bool ret = false;
890
891 spin_lock_irqsave(&worker->lock, flags);
892
893 if (!queuing_blocked(worker, work)) {
894 __kthread_queue_delayed_work(worker, dwork, delay);
895 ret = true;
896 }
897
898 spin_unlock_irqrestore(&worker->lock, flags);
899 return ret;
900}
901EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
902
903struct kthread_flush_work {
904 struct kthread_work work;
905 struct completion done;
906};
907
908static void kthread_flush_work_fn(struct kthread_work *work)
909{
910 struct kthread_flush_work *fwork =
911 container_of(work, struct kthread_flush_work, work);
912 complete(&fwork->done);
913}
914
915/**
916 * kthread_flush_work - flush a kthread_work
917 * @work: work to flush
918 *
919 * If @work is queued or executing, wait for it to finish execution.
920 */
921void kthread_flush_work(struct kthread_work *work)
922{
923 struct kthread_flush_work fwork = {
924 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
925 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
926 };
927 struct kthread_worker *worker;
928 bool noop = false;
929
930 worker = work->worker;
931 if (!worker)
932 return;
933
934 spin_lock_irq(&worker->lock);
935 /* Work must not be used with >1 worker, see kthread_queue_work(). */
936 WARN_ON_ONCE(work->worker != worker);
937
938 if (!list_empty(&work->node))
939 kthread_insert_work(worker, &fwork.work, work->node.next);
940 else if (worker->current_work == work)
941 kthread_insert_work(worker, &fwork.work,
942 worker->work_list.next);
943 else
944 noop = true;
945
946 spin_unlock_irq(&worker->lock);
947
948 if (!noop)
949 wait_for_completion(&fwork.done);
950}
951EXPORT_SYMBOL_GPL(kthread_flush_work);
952
953/*
954 * This function removes the work from the worker queue. Also it makes sure
955 * that it won't get queued later via the delayed work's timer.
956 *
957 * The work might still be in use when this function finishes. See the
958 * current_work proceed by the worker.
959 *
960 * Return: %true if @work was pending and successfully canceled,
961 * %false if @work was not pending
962 */
963static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
964 unsigned long *flags)
965{
966 /* Try to cancel the timer if exists. */
967 if (is_dwork) {
968 struct kthread_delayed_work *dwork =
969 container_of(work, struct kthread_delayed_work, work);
970 struct kthread_worker *worker = work->worker;
971
972 /*
973 * del_timer_sync() must be called to make sure that the timer
974 * callback is not running. The lock must be temporary released
975 * to avoid a deadlock with the callback. In the meantime,
976 * any queuing is blocked by setting the canceling counter.
977 */
978 work->canceling++;
979 spin_unlock_irqrestore(&worker->lock, *flags);
980 del_timer_sync(&dwork->timer);
981 spin_lock_irqsave(&worker->lock, *flags);
982 work->canceling--;
983 }
984
985 /*
986 * Try to remove the work from a worker list. It might either
987 * be from worker->work_list or from worker->delayed_work_list.
988 */
989 if (!list_empty(&work->node)) {
990 list_del_init(&work->node);
991 return true;
992 }
993
994 return false;
995}
996
997/**
998 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
999 * @worker: kthread worker to use
1000 * @dwork: kthread delayed work to queue
1001 * @delay: number of jiffies to wait before queuing
1002 *
1003 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1004 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1005 * @work is guaranteed to be queued immediately.
1006 *
1007 * Return: %true if @dwork was pending and its timer was modified,
1008 * %false otherwise.
1009 *
1010 * A special case is when the work is being canceled in parallel.
1011 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1012 * or yet another kthread_mod_delayed_work() call. We let the other command
1013 * win and return %false here. The caller is supposed to synchronize these
1014 * operations a reasonable way.
1015 *
1016 * This function is safe to call from any context including IRQ handler.
1017 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1018 * for details.
1019 */
1020bool kthread_mod_delayed_work(struct kthread_worker *worker,
1021 struct kthread_delayed_work *dwork,
1022 unsigned long delay)
1023{
1024 struct kthread_work *work = &dwork->work;
1025 unsigned long flags;
1026 int ret = false;
1027
1028 spin_lock_irqsave(&worker->lock, flags);
1029
1030 /* Do not bother with canceling when never queued. */
1031 if (!work->worker)
1032 goto fast_queue;
1033
1034 /* Work must not be used with >1 worker, see kthread_queue_work() */
1035 WARN_ON_ONCE(work->worker != worker);
1036
1037 /* Do not fight with another command that is canceling this work. */
1038 if (work->canceling)
1039 goto out;
1040
1041 ret = __kthread_cancel_work(work, true, &flags);
1042fast_queue:
1043 __kthread_queue_delayed_work(worker, dwork, delay);
1044out:
1045 spin_unlock_irqrestore(&worker->lock, flags);
1046 return ret;
1047}
1048EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1049
1050static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1051{
1052 struct kthread_worker *worker = work->worker;
1053 unsigned long flags;
1054 int ret = false;
1055
1056 if (!worker)
1057 goto out;
1058
1059 spin_lock_irqsave(&worker->lock, flags);
1060 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1061 WARN_ON_ONCE(work->worker != worker);
1062
1063 ret = __kthread_cancel_work(work, is_dwork, &flags);
1064
1065 if (worker->current_work != work)
1066 goto out_fast;
1067
1068 /*
1069 * The work is in progress and we need to wait with the lock released.
1070 * In the meantime, block any queuing by setting the canceling counter.
1071 */
1072 work->canceling++;
1073 spin_unlock_irqrestore(&worker->lock, flags);
1074 kthread_flush_work(work);
1075 spin_lock_irqsave(&worker->lock, flags);
1076 work->canceling--;
1077
1078out_fast:
1079 spin_unlock_irqrestore(&worker->lock, flags);
1080out:
1081 return ret;
1082}
1083
1084/**
1085 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1086 * @work: the kthread work to cancel
1087 *
1088 * Cancel @work and wait for its execution to finish. This function
1089 * can be used even if the work re-queues itself. On return from this
1090 * function, @work is guaranteed to be not pending or executing on any CPU.
1091 *
1092 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1093 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1094 *
1095 * The caller must ensure that the worker on which @work was last
1096 * queued can't be destroyed before this function returns.
1097 *
1098 * Return: %true if @work was pending, %false otherwise.
1099 */
1100bool kthread_cancel_work_sync(struct kthread_work *work)
1101{
1102 return __kthread_cancel_work_sync(work, false);
1103}
1104EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1105
1106/**
1107 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1108 * wait for it to finish.
1109 * @dwork: the kthread delayed work to cancel
1110 *
1111 * This is kthread_cancel_work_sync() for delayed works.
1112 *
1113 * Return: %true if @dwork was pending, %false otherwise.
1114 */
1115bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1116{
1117 return __kthread_cancel_work_sync(&dwork->work, true);
1118}
1119EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1120
1121/**
1122 * kthread_flush_worker - flush all current works on a kthread_worker
1123 * @worker: worker to flush
1124 *
1125 * Wait until all currently executing or pending works on @worker are
1126 * finished.
1127 */
1128void kthread_flush_worker(struct kthread_worker *worker)
1129{
1130 struct kthread_flush_work fwork = {
1131 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1132 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1133 };
1134
1135 kthread_queue_work(worker, &fwork.work);
1136 wait_for_completion(&fwork.done);
1137}
1138EXPORT_SYMBOL_GPL(kthread_flush_worker);
1139
1140/**
1141 * kthread_destroy_worker - destroy a kthread worker
1142 * @worker: worker to be destroyed
1143 *
1144 * Flush and destroy @worker. The simple flush is enough because the kthread
1145 * worker API is used only in trivial scenarios. There are no multi-step state
1146 * machines needed.
1147 */
1148void kthread_destroy_worker(struct kthread_worker *worker)
1149{
1150 struct task_struct *task;
1151
1152 task = worker->task;
1153 if (WARN_ON(!task))
1154 return;
1155
1156 kthread_flush_worker(worker);
1157 kthread_stop(task);
1158 WARN_ON(!list_empty(&worker->work_list));
1159 kfree(worker);
1160}
1161EXPORT_SYMBOL(kthread_destroy_worker);
1162