blob: 9b8cd7ebf27bcc6a52f8dd8bc08f0679dcfec8d2
1 | /* |
2 | * kernel/stop_machine.c |
3 | * |
4 | * Copyright (C) 2008, 2005 IBM Corporation. |
5 | * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au |
6 | * Copyright (C) 2010 SUSE Linux Products GmbH |
7 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
8 | * |
9 | * This file is released under the GPLv2 and any later version. |
10 | */ |
11 | #include <linux/completion.h> |
12 | #include <linux/cpu.h> |
13 | #include <linux/init.h> |
14 | #include <linux/kthread.h> |
15 | #include <linux/export.h> |
16 | #include <linux/percpu.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/stop_machine.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/kallsyms.h> |
21 | #include <linux/smpboot.h> |
22 | #include <linux/atomic.h> |
23 | #include <linux/nmi.h> |
24 | |
25 | /* |
26 | * Structure to determine completion condition and record errors. May |
27 | * be shared by works on different cpus. |
28 | */ |
29 | struct cpu_stop_done { |
30 | atomic_t nr_todo; /* nr left to execute */ |
31 | int ret; /* collected return value */ |
32 | struct completion completion; /* fired if nr_todo reaches 0 */ |
33 | }; |
34 | |
35 | /* the actual stopper, one per every possible cpu, enabled on online cpus */ |
36 | struct cpu_stopper { |
37 | struct task_struct *thread; |
38 | |
39 | raw_spinlock_t lock; |
40 | bool enabled; /* is this stopper enabled? */ |
41 | struct list_head works; /* list of pending works */ |
42 | |
43 | struct cpu_stop_work stop_work; /* for stop_cpus */ |
44 | }; |
45 | |
46 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); |
47 | static bool stop_machine_initialized = false; |
48 | |
49 | /* static data for stop_cpus */ |
50 | static DEFINE_MUTEX(stop_cpus_mutex); |
51 | static bool stop_cpus_in_progress; |
52 | |
53 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) |
54 | { |
55 | memset(done, 0, sizeof(*done)); |
56 | atomic_set(&done->nr_todo, nr_todo); |
57 | init_completion(&done->completion); |
58 | } |
59 | |
60 | /* signal completion unless @done is NULL */ |
61 | static void cpu_stop_signal_done(struct cpu_stop_done *done) |
62 | { |
63 | if (atomic_dec_and_test(&done->nr_todo)) |
64 | complete(&done->completion); |
65 | } |
66 | |
67 | static void __cpu_stop_queue_work(struct cpu_stopper *stopper, |
68 | struct cpu_stop_work *work) |
69 | { |
70 | list_add_tail(&work->list, &stopper->works); |
71 | wake_up_process(stopper->thread); |
72 | } |
73 | |
74 | /* queue @work to @stopper. if offline, @work is completed immediately */ |
75 | static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) |
76 | { |
77 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
78 | unsigned long flags; |
79 | bool enabled; |
80 | |
81 | raw_spin_lock_irqsave(&stopper->lock, flags); |
82 | enabled = stopper->enabled; |
83 | if (enabled) |
84 | __cpu_stop_queue_work(stopper, work); |
85 | else if (work->done) |
86 | cpu_stop_signal_done(work->done); |
87 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
88 | |
89 | return enabled; |
90 | } |
91 | |
92 | /** |
93 | * stop_one_cpu - stop a cpu |
94 | * @cpu: cpu to stop |
95 | * @fn: function to execute |
96 | * @arg: argument to @fn |
97 | * |
98 | * Execute @fn(@arg) on @cpu. @fn is run in a process context with |
99 | * the highest priority preempting any task on the cpu and |
100 | * monopolizing it. This function returns after the execution is |
101 | * complete. |
102 | * |
103 | * This function doesn't guarantee @cpu stays online till @fn |
104 | * completes. If @cpu goes down in the middle, execution may happen |
105 | * partially or fully on different cpus. @fn should either be ready |
106 | * for that or the caller should ensure that @cpu stays online until |
107 | * this function completes. |
108 | * |
109 | * CONTEXT: |
110 | * Might sleep. |
111 | * |
112 | * RETURNS: |
113 | * -ENOENT if @fn(@arg) was not executed because @cpu was offline; |
114 | * otherwise, the return value of @fn. |
115 | */ |
116 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) |
117 | { |
118 | struct cpu_stop_done done; |
119 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; |
120 | |
121 | cpu_stop_init_done(&done, 1); |
122 | if (!cpu_stop_queue_work(cpu, &work)) |
123 | return -ENOENT; |
124 | /* |
125 | * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup |
126 | * cycle by doing a preemption: |
127 | */ |
128 | cond_resched(); |
129 | wait_for_completion(&done.completion); |
130 | return done.ret; |
131 | } |
132 | |
133 | /* This controls the threads on each CPU. */ |
134 | enum multi_stop_state { |
135 | /* Dummy starting state for thread. */ |
136 | MULTI_STOP_NONE, |
137 | /* Awaiting everyone to be scheduled. */ |
138 | MULTI_STOP_PREPARE, |
139 | /* Disable interrupts. */ |
140 | MULTI_STOP_DISABLE_IRQ, |
141 | /* Run the function */ |
142 | MULTI_STOP_RUN, |
143 | /* Exit */ |
144 | MULTI_STOP_EXIT, |
145 | }; |
146 | |
147 | struct multi_stop_data { |
148 | cpu_stop_fn_t fn; |
149 | void *data; |
150 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
151 | unsigned int num_threads; |
152 | const struct cpumask *active_cpus; |
153 | |
154 | enum multi_stop_state state; |
155 | atomic_t thread_ack; |
156 | }; |
157 | |
158 | static void set_state(struct multi_stop_data *msdata, |
159 | enum multi_stop_state newstate) |
160 | { |
161 | /* Reset ack counter. */ |
162 | atomic_set(&msdata->thread_ack, msdata->num_threads); |
163 | smp_wmb(); |
164 | msdata->state = newstate; |
165 | } |
166 | |
167 | /* Last one to ack a state moves to the next state. */ |
168 | static void ack_state(struct multi_stop_data *msdata) |
169 | { |
170 | if (atomic_dec_and_test(&msdata->thread_ack)) |
171 | set_state(msdata, msdata->state + 1); |
172 | } |
173 | |
174 | /* This is the cpu_stop function which stops the CPU. */ |
175 | static int multi_cpu_stop(void *data) |
176 | { |
177 | struct multi_stop_data *msdata = data; |
178 | enum multi_stop_state curstate = MULTI_STOP_NONE; |
179 | int cpu = smp_processor_id(), err = 0; |
180 | unsigned long flags; |
181 | bool is_active; |
182 | |
183 | /* |
184 | * When called from stop_machine_from_inactive_cpu(), irq might |
185 | * already be disabled. Save the state and restore it on exit. |
186 | */ |
187 | local_save_flags(flags); |
188 | |
189 | if (!msdata->active_cpus) |
190 | is_active = cpu == cpumask_first(cpu_online_mask); |
191 | else |
192 | is_active = cpumask_test_cpu(cpu, msdata->active_cpus); |
193 | |
194 | /* Simple state machine */ |
195 | do { |
196 | /* Chill out and ensure we re-read multi_stop_state. */ |
197 | cpu_relax(); |
198 | if (msdata->state != curstate) { |
199 | curstate = msdata->state; |
200 | switch (curstate) { |
201 | case MULTI_STOP_DISABLE_IRQ: |
202 | local_irq_disable(); |
203 | hard_irq_disable(); |
204 | break; |
205 | case MULTI_STOP_RUN: |
206 | if (is_active) |
207 | err = msdata->fn(msdata->data); |
208 | break; |
209 | default: |
210 | break; |
211 | } |
212 | ack_state(msdata); |
213 | } else if (curstate > MULTI_STOP_PREPARE) { |
214 | /* |
215 | * At this stage all other CPUs we depend on must spin |
216 | * in the same loop. Any reason for hard-lockup should |
217 | * be detected and reported on their side. |
218 | */ |
219 | touch_nmi_watchdog(); |
220 | } |
221 | } while (curstate != MULTI_STOP_EXIT); |
222 | |
223 | local_irq_restore(flags); |
224 | return err; |
225 | } |
226 | |
227 | static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, |
228 | int cpu2, struct cpu_stop_work *work2) |
229 | { |
230 | struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); |
231 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); |
232 | int err; |
233 | retry: |
234 | raw_spin_lock_irq(&stopper1->lock); |
235 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); |
236 | |
237 | err = -ENOENT; |
238 | if (!stopper1->enabled || !stopper2->enabled) |
239 | goto unlock; |
240 | /* |
241 | * Ensure that if we race with __stop_cpus() the stoppers won't get |
242 | * queued up in reverse order leading to system deadlock. |
243 | * |
244 | * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has |
245 | * queued a work on cpu1 but not on cpu2, we hold both locks. |
246 | * |
247 | * It can be falsely true but it is safe to spin until it is cleared, |
248 | * queue_stop_cpus_work() does everything under preempt_disable(). |
249 | */ |
250 | err = -EDEADLK; |
251 | if (unlikely(stop_cpus_in_progress)) |
252 | goto unlock; |
253 | |
254 | err = 0; |
255 | __cpu_stop_queue_work(stopper1, work1); |
256 | __cpu_stop_queue_work(stopper2, work2); |
257 | unlock: |
258 | raw_spin_unlock(&stopper2->lock); |
259 | raw_spin_unlock_irq(&stopper1->lock); |
260 | |
261 | if (unlikely(err == -EDEADLK)) { |
262 | while (stop_cpus_in_progress) |
263 | cpu_relax(); |
264 | goto retry; |
265 | } |
266 | return err; |
267 | } |
268 | /** |
269 | * stop_two_cpus - stops two cpus |
270 | * @cpu1: the cpu to stop |
271 | * @cpu2: the other cpu to stop |
272 | * @fn: function to execute |
273 | * @arg: argument to @fn |
274 | * |
275 | * Stops both the current and specified CPU and runs @fn on one of them. |
276 | * |
277 | * returns when both are completed. |
278 | */ |
279 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) |
280 | { |
281 | struct cpu_stop_done done; |
282 | struct cpu_stop_work work1, work2; |
283 | struct multi_stop_data msdata; |
284 | |
285 | msdata = (struct multi_stop_data){ |
286 | .fn = fn, |
287 | .data = arg, |
288 | .num_threads = 2, |
289 | .active_cpus = cpumask_of(cpu1), |
290 | }; |
291 | |
292 | work1 = work2 = (struct cpu_stop_work){ |
293 | .fn = multi_cpu_stop, |
294 | .arg = &msdata, |
295 | .done = &done |
296 | }; |
297 | |
298 | cpu_stop_init_done(&done, 2); |
299 | set_state(&msdata, MULTI_STOP_PREPARE); |
300 | |
301 | if (cpu1 > cpu2) |
302 | swap(cpu1, cpu2); |
303 | if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) |
304 | return -ENOENT; |
305 | |
306 | wait_for_completion(&done.completion); |
307 | return done.ret; |
308 | } |
309 | |
310 | /** |
311 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion |
312 | * @cpu: cpu to stop |
313 | * @fn: function to execute |
314 | * @arg: argument to @fn |
315 | * @work_buf: pointer to cpu_stop_work structure |
316 | * |
317 | * Similar to stop_one_cpu() but doesn't wait for completion. The |
318 | * caller is responsible for ensuring @work_buf is currently unused |
319 | * and will remain untouched until stopper starts executing @fn. |
320 | * |
321 | * CONTEXT: |
322 | * Don't care. |
323 | * |
324 | * RETURNS: |
325 | * true if cpu_stop_work was queued successfully and @fn will be called, |
326 | * false otherwise. |
327 | */ |
328 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
329 | struct cpu_stop_work *work_buf) |
330 | { |
331 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; |
332 | return cpu_stop_queue_work(cpu, work_buf); |
333 | } |
334 | |
335 | static bool queue_stop_cpus_work(const struct cpumask *cpumask, |
336 | cpu_stop_fn_t fn, void *arg, |
337 | struct cpu_stop_done *done) |
338 | { |
339 | struct cpu_stop_work *work; |
340 | unsigned int cpu; |
341 | bool queued = false; |
342 | |
343 | /* |
344 | * Disable preemption while queueing to avoid getting |
345 | * preempted by a stopper which might wait for other stoppers |
346 | * to enter @fn which can lead to deadlock. |
347 | */ |
348 | preempt_disable(); |
349 | stop_cpus_in_progress = true; |
350 | for_each_cpu(cpu, cpumask) { |
351 | work = &per_cpu(cpu_stopper.stop_work, cpu); |
352 | work->fn = fn; |
353 | work->arg = arg; |
354 | work->done = done; |
355 | if (cpu_stop_queue_work(cpu, work)) |
356 | queued = true; |
357 | } |
358 | stop_cpus_in_progress = false; |
359 | preempt_enable(); |
360 | |
361 | return queued; |
362 | } |
363 | |
364 | static int __stop_cpus(const struct cpumask *cpumask, |
365 | cpu_stop_fn_t fn, void *arg) |
366 | { |
367 | struct cpu_stop_done done; |
368 | |
369 | cpu_stop_init_done(&done, cpumask_weight(cpumask)); |
370 | if (!queue_stop_cpus_work(cpumask, fn, arg, &done)) |
371 | return -ENOENT; |
372 | wait_for_completion(&done.completion); |
373 | return done.ret; |
374 | } |
375 | |
376 | /** |
377 | * stop_cpus - stop multiple cpus |
378 | * @cpumask: cpus to stop |
379 | * @fn: function to execute |
380 | * @arg: argument to @fn |
381 | * |
382 | * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, |
383 | * @fn is run in a process context with the highest priority |
384 | * preempting any task on the cpu and monopolizing it. This function |
385 | * returns after all executions are complete. |
386 | * |
387 | * This function doesn't guarantee the cpus in @cpumask stay online |
388 | * till @fn completes. If some cpus go down in the middle, execution |
389 | * on the cpu may happen partially or fully on different cpus. @fn |
390 | * should either be ready for that or the caller should ensure that |
391 | * the cpus stay online until this function completes. |
392 | * |
393 | * All stop_cpus() calls are serialized making it safe for @fn to wait |
394 | * for all cpus to start executing it. |
395 | * |
396 | * CONTEXT: |
397 | * Might sleep. |
398 | * |
399 | * RETURNS: |
400 | * -ENOENT if @fn(@arg) was not executed at all because all cpus in |
401 | * @cpumask were offline; otherwise, 0 if all executions of @fn |
402 | * returned 0, any non zero return value if any returned non zero. |
403 | */ |
404 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
405 | { |
406 | int ret; |
407 | |
408 | /* static works are used, process one request at a time */ |
409 | mutex_lock(&stop_cpus_mutex); |
410 | ret = __stop_cpus(cpumask, fn, arg); |
411 | mutex_unlock(&stop_cpus_mutex); |
412 | return ret; |
413 | } |
414 | |
415 | /** |
416 | * try_stop_cpus - try to stop multiple cpus |
417 | * @cpumask: cpus to stop |
418 | * @fn: function to execute |
419 | * @arg: argument to @fn |
420 | * |
421 | * Identical to stop_cpus() except that it fails with -EAGAIN if |
422 | * someone else is already using the facility. |
423 | * |
424 | * CONTEXT: |
425 | * Might sleep. |
426 | * |
427 | * RETURNS: |
428 | * -EAGAIN if someone else is already stopping cpus, -ENOENT if |
429 | * @fn(@arg) was not executed at all because all cpus in @cpumask were |
430 | * offline; otherwise, 0 if all executions of @fn returned 0, any non |
431 | * zero return value if any returned non zero. |
432 | */ |
433 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
434 | { |
435 | int ret; |
436 | |
437 | /* static works are used, process one request at a time */ |
438 | if (!mutex_trylock(&stop_cpus_mutex)) |
439 | return -EAGAIN; |
440 | ret = __stop_cpus(cpumask, fn, arg); |
441 | mutex_unlock(&stop_cpus_mutex); |
442 | return ret; |
443 | } |
444 | |
445 | static int cpu_stop_should_run(unsigned int cpu) |
446 | { |
447 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
448 | unsigned long flags; |
449 | int run; |
450 | |
451 | raw_spin_lock_irqsave(&stopper->lock, flags); |
452 | run = !list_empty(&stopper->works); |
453 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
454 | return run; |
455 | } |
456 | |
457 | static void cpu_stopper_thread(unsigned int cpu) |
458 | { |
459 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
460 | struct cpu_stop_work *work; |
461 | |
462 | repeat: |
463 | work = NULL; |
464 | raw_spin_lock_irq(&stopper->lock); |
465 | if (!list_empty(&stopper->works)) { |
466 | work = list_first_entry(&stopper->works, |
467 | struct cpu_stop_work, list); |
468 | list_del_init(&work->list); |
469 | } |
470 | raw_spin_unlock_irq(&stopper->lock); |
471 | |
472 | if (work) { |
473 | cpu_stop_fn_t fn = work->fn; |
474 | void *arg = work->arg; |
475 | struct cpu_stop_done *done = work->done; |
476 | int ret; |
477 | |
478 | /* cpu stop callbacks must not sleep, make in_atomic() == T */ |
479 | preempt_count_inc(); |
480 | ret = fn(arg); |
481 | if (done) { |
482 | if (ret) |
483 | done->ret = ret; |
484 | cpu_stop_signal_done(done); |
485 | } |
486 | preempt_count_dec(); |
487 | WARN_ONCE(preempt_count(), |
488 | "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg); |
489 | goto repeat; |
490 | } |
491 | } |
492 | |
493 | void stop_machine_park(int cpu) |
494 | { |
495 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
496 | /* |
497 | * Lockless. cpu_stopper_thread() will take stopper->lock and flush |
498 | * the pending works before it parks, until then it is fine to queue |
499 | * the new works. |
500 | */ |
501 | stopper->enabled = false; |
502 | kthread_park(stopper->thread); |
503 | } |
504 | |
505 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); |
506 | |
507 | static void cpu_stop_create(unsigned int cpu) |
508 | { |
509 | sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); |
510 | } |
511 | |
512 | static void cpu_stop_park(unsigned int cpu) |
513 | { |
514 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
515 | |
516 | WARN_ON(!list_empty(&stopper->works)); |
517 | } |
518 | |
519 | void stop_machine_unpark(int cpu) |
520 | { |
521 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
522 | |
523 | stopper->enabled = true; |
524 | kthread_unpark(stopper->thread); |
525 | } |
526 | |
527 | static struct smp_hotplug_thread cpu_stop_threads = { |
528 | .store = &cpu_stopper.thread, |
529 | .thread_should_run = cpu_stop_should_run, |
530 | .thread_fn = cpu_stopper_thread, |
531 | .thread_comm = "migration/%u", |
532 | .create = cpu_stop_create, |
533 | .park = cpu_stop_park, |
534 | .selfparking = true, |
535 | }; |
536 | |
537 | static int __init cpu_stop_init(void) |
538 | { |
539 | unsigned int cpu; |
540 | |
541 | for_each_possible_cpu(cpu) { |
542 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
543 | |
544 | raw_spin_lock_init(&stopper->lock); |
545 | INIT_LIST_HEAD(&stopper->works); |
546 | } |
547 | |
548 | BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); |
549 | stop_machine_unpark(raw_smp_processor_id()); |
550 | stop_machine_initialized = true; |
551 | return 0; |
552 | } |
553 | early_initcall(cpu_stop_init); |
554 | |
555 | static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) |
556 | { |
557 | struct multi_stop_data msdata = { |
558 | .fn = fn, |
559 | .data = data, |
560 | .num_threads = num_online_cpus(), |
561 | .active_cpus = cpus, |
562 | }; |
563 | |
564 | if (!stop_machine_initialized) { |
565 | /* |
566 | * Handle the case where stop_machine() is called |
567 | * early in boot before stop_machine() has been |
568 | * initialized. |
569 | */ |
570 | unsigned long flags; |
571 | int ret; |
572 | |
573 | WARN_ON_ONCE(msdata.num_threads != 1); |
574 | |
575 | local_irq_save(flags); |
576 | hard_irq_disable(); |
577 | ret = (*fn)(data); |
578 | local_irq_restore(flags); |
579 | |
580 | return ret; |
581 | } |
582 | |
583 | /* Set the initial state and stop all online cpus. */ |
584 | set_state(&msdata, MULTI_STOP_PREPARE); |
585 | return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); |
586 | } |
587 | |
588 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) |
589 | { |
590 | int ret; |
591 | |
592 | /* No CPUs can come up or down during this. */ |
593 | get_online_cpus(); |
594 | ret = __stop_machine(fn, data, cpus); |
595 | put_online_cpus(); |
596 | return ret; |
597 | } |
598 | EXPORT_SYMBOL_GPL(stop_machine); |
599 | |
600 | /** |
601 | * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU |
602 | * @fn: the function to run |
603 | * @data: the data ptr for the @fn() |
604 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
605 | * |
606 | * This is identical to stop_machine() but can be called from a CPU which |
607 | * is not active. The local CPU is in the process of hotplug (so no other |
608 | * CPU hotplug can start) and not marked active and doesn't have enough |
609 | * context to sleep. |
610 | * |
611 | * This function provides stop_machine() functionality for such state by |
612 | * using busy-wait for synchronization and executing @fn directly for local |
613 | * CPU. |
614 | * |
615 | * CONTEXT: |
616 | * Local CPU is inactive. Temporarily stops all active CPUs. |
617 | * |
618 | * RETURNS: |
619 | * 0 if all executions of @fn returned 0, any non zero return value if any |
620 | * returned non zero. |
621 | */ |
622 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
623 | const struct cpumask *cpus) |
624 | { |
625 | struct multi_stop_data msdata = { .fn = fn, .data = data, |
626 | .active_cpus = cpus }; |
627 | struct cpu_stop_done done; |
628 | int ret; |
629 | |
630 | /* Local CPU must be inactive and CPU hotplug in progress. */ |
631 | BUG_ON(cpu_active(raw_smp_processor_id())); |
632 | msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ |
633 | |
634 | /* No proper task established and can't sleep - busy wait for lock. */ |
635 | while (!mutex_trylock(&stop_cpus_mutex)) |
636 | cpu_relax(); |
637 | |
638 | /* Schedule work on other CPUs and execute directly for local CPU */ |
639 | set_state(&msdata, MULTI_STOP_PREPARE); |
640 | cpu_stop_init_done(&done, num_active_cpus()); |
641 | queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, |
642 | &done); |
643 | ret = multi_cpu_stop(&msdata); |
644 | |
645 | /* Busy wait for completion. */ |
646 | while (!completion_done(&done.completion)) |
647 | cpu_relax(); |
648 | |
649 | mutex_unlock(&stop_cpus_mutex); |
650 | return ret ?: done.ret; |
651 | } |
652 |