summaryrefslogtreecommitdiff
path: root/kernel/smp.c (plain)
blob: 399905fdfa3f85bbc167fd468d56166fb7b12b5c
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 */
6#include <linux/irq_work.h>
7#include <linux/rcupdate.h>
8#include <linux/rculist.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/percpu.h>
12#include <linux/init.h>
13#include <linux/gfp.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
17#include <linux/hypervisor.h>
18
19#include "smpboot.h"
20
21enum {
22 CSD_FLAG_LOCK = 0x01,
23 CSD_FLAG_SYNCHRONOUS = 0x02,
24};
25
26struct call_function_data {
27 struct call_single_data __percpu *csd;
28 cpumask_var_t cpumask;
29};
30
31static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
32
33static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
34
35static void flush_smp_call_function_queue(bool warn_cpu_offline);
36
37int smpcfd_prepare_cpu(unsigned int cpu)
38{
39 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
40
41 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
42 cpu_to_node(cpu)))
43 return -ENOMEM;
44 cfd->csd = alloc_percpu(struct call_single_data);
45 if (!cfd->csd) {
46 free_cpumask_var(cfd->cpumask);
47 return -ENOMEM;
48 }
49
50 return 0;
51}
52
53int smpcfd_dead_cpu(unsigned int cpu)
54{
55 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
56
57 free_cpumask_var(cfd->cpumask);
58 free_percpu(cfd->csd);
59 return 0;
60}
61
62int smpcfd_dying_cpu(unsigned int cpu)
63{
64 /*
65 * The IPIs for the smp-call-function callbacks queued by other
66 * CPUs might arrive late, either due to hardware latencies or
67 * because this CPU disabled interrupts (inside stop-machine)
68 * before the IPIs were sent. So flush out any pending callbacks
69 * explicitly (without waiting for the IPIs to arrive), to
70 * ensure that the outgoing CPU doesn't go offline with work
71 * still pending.
72 */
73 flush_smp_call_function_queue(false);
74 return 0;
75}
76
77void __init call_function_init(void)
78{
79 int i;
80
81 for_each_possible_cpu(i)
82 init_llist_head(&per_cpu(call_single_queue, i));
83
84 smpcfd_prepare_cpu(smp_processor_id());
85}
86
87/*
88 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
89 *
90 * For non-synchronous ipi calls the csd can still be in use by the
91 * previous function call. For multi-cpu calls its even more interesting
92 * as we'll have to ensure no other cpu is observing our csd.
93 */
94static __always_inline void csd_lock_wait(struct call_single_data *csd)
95{
96 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
97}
98
99static __always_inline void csd_lock(struct call_single_data *csd)
100{
101 csd_lock_wait(csd);
102 csd->flags |= CSD_FLAG_LOCK;
103
104 /*
105 * prevent CPU from reordering the above assignment
106 * to ->flags with any subsequent assignments to other
107 * fields of the specified call_single_data structure:
108 */
109 smp_wmb();
110}
111
112static __always_inline void csd_unlock(struct call_single_data *csd)
113{
114 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
115
116 /*
117 * ensure we're all done before releasing data:
118 */
119 smp_store_release(&csd->flags, 0);
120}
121
122static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
123
124/*
125 * Insert a previously allocated call_single_data element
126 * for execution on the given CPU. data must already have
127 * ->func, ->info, and ->flags set.
128 */
129static int generic_exec_single(int cpu, struct call_single_data *csd,
130 smp_call_func_t func, void *info)
131{
132 if (cpu == smp_processor_id()) {
133 unsigned long flags;
134
135 /*
136 * We can unlock early even for the synchronous on-stack case,
137 * since we're doing this from the same CPU..
138 */
139 csd_unlock(csd);
140 local_irq_save(flags);
141 func(info);
142 local_irq_restore(flags);
143 return 0;
144 }
145
146
147 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
148 csd_unlock(csd);
149 return -ENXIO;
150 }
151
152 csd->func = func;
153 csd->info = info;
154
155 /*
156 * The list addition should be visible before sending the IPI
157 * handler locks the list to pull the entry off it because of
158 * normal cache coherency rules implied by spinlocks.
159 *
160 * If IPIs can go out of order to the cache coherency protocol
161 * in an architecture, sufficient synchronisation should be added
162 * to arch code to make it appear to obey cache coherency WRT
163 * locking and barrier primitives. Generic code isn't really
164 * equipped to do the right thing...
165 */
166 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
167 arch_send_call_function_single_ipi(cpu);
168
169 return 0;
170}
171
172/**
173 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
174 *
175 * Invoked by arch to handle an IPI for call function single.
176 * Must be called with interrupts disabled.
177 */
178void generic_smp_call_function_single_interrupt(void)
179{
180 flush_smp_call_function_queue(true);
181}
182
183/**
184 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
185 *
186 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
187 * offline CPU. Skip this check if set to 'false'.
188 *
189 * Flush any pending smp-call-function callbacks queued on this CPU. This is
190 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
191 * to ensure that all pending IPI callbacks are run before it goes completely
192 * offline.
193 *
194 * Loop through the call_single_queue and run all the queued callbacks.
195 * Must be called with interrupts disabled.
196 */
197static void flush_smp_call_function_queue(bool warn_cpu_offline)
198{
199 struct llist_head *head;
200 struct llist_node *entry;
201 struct call_single_data *csd, *csd_next;
202 static bool warned;
203
204 WARN_ON(!irqs_disabled());
205
206 head = this_cpu_ptr(&call_single_queue);
207 entry = llist_del_all(head);
208 entry = llist_reverse_order(entry);
209
210 /* There shouldn't be any pending callbacks on an offline CPU. */
211 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
212 !warned && !llist_empty(head))) {
213 warned = true;
214 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
215
216 /*
217 * We don't have to use the _safe() variant here
218 * because we are not invoking the IPI handlers yet.
219 */
220 llist_for_each_entry(csd, entry, llist)
221 pr_warn("IPI callback %pS sent to offline CPU\n",
222 csd->func);
223 }
224
225 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
226 smp_call_func_t func = csd->func;
227 void *info = csd->info;
228
229 /* Do we wait until *after* callback? */
230 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
231 func(info);
232 csd_unlock(csd);
233 } else {
234 csd_unlock(csd);
235 func(info);
236 }
237 }
238
239 /*
240 * Handle irq works queued remotely by irq_work_queue_on().
241 * Smp functions above are typically synchronous so they
242 * better run first since some other CPUs may be busy waiting
243 * for them.
244 */
245 irq_work_run();
246}
247
248/*
249 * smp_call_function_single - Run a function on a specific CPU
250 * @func: The function to run. This must be fast and non-blocking.
251 * @info: An arbitrary pointer to pass to the function.
252 * @wait: If true, wait until function has completed on other CPUs.
253 *
254 * Returns 0 on success, else a negative status code.
255 */
256int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
257 int wait)
258{
259 struct call_single_data *csd;
260 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
261 int this_cpu;
262 int err;
263
264 /*
265 * prevent preemption and reschedule on another processor,
266 * as well as CPU removal
267 */
268 this_cpu = get_cpu();
269
270 /*
271 * Can deadlock when called with interrupts disabled.
272 * We allow cpu's that are not yet online though, as no one else can
273 * send smp call function interrupt to this cpu and as such deadlocks
274 * can't happen.
275 */
276 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
277 && !oops_in_progress);
278
279 csd = &csd_stack;
280 if (!wait) {
281 csd = this_cpu_ptr(&csd_data);
282 csd_lock(csd);
283 }
284
285 err = generic_exec_single(cpu, csd, func, info);
286
287 if (wait)
288 csd_lock_wait(csd);
289
290 put_cpu();
291
292 return err;
293}
294EXPORT_SYMBOL(smp_call_function_single);
295
296/**
297 * smp_call_function_single_async(): Run an asynchronous function on a
298 * specific CPU.
299 * @cpu: The CPU to run on.
300 * @csd: Pre-allocated and setup data structure
301 *
302 * Like smp_call_function_single(), but the call is asynchonous and
303 * can thus be done from contexts with disabled interrupts.
304 *
305 * The caller passes his own pre-allocated data structure
306 * (ie: embedded in an object) and is responsible for synchronizing it
307 * such that the IPIs performed on the @csd are strictly serialized.
308 *
309 * NOTE: Be careful, there is unfortunately no current debugging facility to
310 * validate the correctness of this serialization.
311 */
312int smp_call_function_single_async(int cpu, struct call_single_data *csd)
313{
314 int err = 0;
315
316 preempt_disable();
317
318 /* We could deadlock if we have to wait here with interrupts disabled! */
319 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
320 csd_lock_wait(csd);
321
322 csd->flags = CSD_FLAG_LOCK;
323 smp_wmb();
324
325 err = generic_exec_single(cpu, csd, csd->func, csd->info);
326 preempt_enable();
327
328 return err;
329}
330EXPORT_SYMBOL_GPL(smp_call_function_single_async);
331
332/*
333 * smp_call_function_any - Run a function on any of the given cpus
334 * @mask: The mask of cpus it can run on.
335 * @func: The function to run. This must be fast and non-blocking.
336 * @info: An arbitrary pointer to pass to the function.
337 * @wait: If true, wait until function has completed.
338 *
339 * Returns 0 on success, else a negative status code (if no cpus were online).
340 *
341 * Selection preference:
342 * 1) current cpu if in @mask
343 * 2) any cpu of current node if in @mask
344 * 3) any other online cpu in @mask
345 */
346int smp_call_function_any(const struct cpumask *mask,
347 smp_call_func_t func, void *info, int wait)
348{
349 unsigned int cpu;
350 const struct cpumask *nodemask;
351 int ret;
352
353 /* Try for same CPU (cheapest) */
354 cpu = get_cpu();
355 if (cpumask_test_cpu(cpu, mask))
356 goto call;
357
358 /* Try for same node. */
359 nodemask = cpumask_of_node(cpu_to_node(cpu));
360 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
361 cpu = cpumask_next_and(cpu, nodemask, mask)) {
362 if (cpu_online(cpu))
363 goto call;
364 }
365
366 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
367 cpu = cpumask_any_and(mask, cpu_online_mask);
368call:
369 ret = smp_call_function_single(cpu, func, info, wait);
370 put_cpu();
371 return ret;
372}
373EXPORT_SYMBOL_GPL(smp_call_function_any);
374
375/**
376 * smp_call_function_many(): Run a function on a set of other CPUs.
377 * @mask: The set of cpus to run on (only runs on online subset).
378 * @func: The function to run. This must be fast and non-blocking.
379 * @info: An arbitrary pointer to pass to the function.
380 * @wait: If true, wait (atomically) until function has completed
381 * on other CPUs.
382 *
383 * If @wait is true, then returns once @func has returned.
384 *
385 * You must not call this function with disabled interrupts or from a
386 * hardware interrupt handler or from a bottom half handler. Preemption
387 * must be disabled when calling this function.
388 */
389void smp_call_function_many(const struct cpumask *mask,
390 smp_call_func_t func, void *info, bool wait)
391{
392 struct call_function_data *cfd;
393 int cpu, next_cpu, this_cpu = smp_processor_id();
394
395 /*
396 * Can deadlock when called with interrupts disabled.
397 * We allow cpu's that are not yet online though, as no one else can
398 * send smp call function interrupt to this cpu and as such deadlocks
399 * can't happen.
400 */
401 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
402 && !oops_in_progress && !early_boot_irqs_disabled);
403
404 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
405 cpu = cpumask_first_and(mask, cpu_online_mask);
406 if (cpu == this_cpu)
407 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
408
409 /* No online cpus? We're done. */
410 if (cpu >= nr_cpu_ids)
411 return;
412
413 /* Do we have another CPU which isn't us? */
414 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
415 if (next_cpu == this_cpu)
416 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
417
418 /* Fastpath: do that cpu by itself. */
419 if (next_cpu >= nr_cpu_ids) {
420 smp_call_function_single(cpu, func, info, wait);
421 return;
422 }
423
424 cfd = this_cpu_ptr(&cfd_data);
425
426 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
427 cpumask_clear_cpu(this_cpu, cfd->cpumask);
428
429 /* Some callers race with other cpus changing the passed mask */
430 if (unlikely(!cpumask_weight(cfd->cpumask)))
431 return;
432
433 for_each_cpu(cpu, cfd->cpumask) {
434 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
435
436 csd_lock(csd);
437 if (wait)
438 csd->flags |= CSD_FLAG_SYNCHRONOUS;
439 csd->func = func;
440 csd->info = info;
441 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
442 }
443
444 /* Send a message to all CPUs in the map */
445 arch_send_call_function_ipi_mask(cfd->cpumask);
446
447 if (wait) {
448 for_each_cpu(cpu, cfd->cpumask) {
449 struct call_single_data *csd;
450
451 csd = per_cpu_ptr(cfd->csd, cpu);
452 csd_lock_wait(csd);
453 }
454 }
455}
456EXPORT_SYMBOL(smp_call_function_many);
457
458/**
459 * smp_call_function(): Run a function on all other CPUs.
460 * @func: The function to run. This must be fast and non-blocking.
461 * @info: An arbitrary pointer to pass to the function.
462 * @wait: If true, wait (atomically) until function has completed
463 * on other CPUs.
464 *
465 * Returns 0.
466 *
467 * If @wait is true, then returns once @func has returned; otherwise
468 * it returns just before the target cpu calls @func.
469 *
470 * You must not call this function with disabled interrupts or from a
471 * hardware interrupt handler or from a bottom half handler.
472 */
473int smp_call_function(smp_call_func_t func, void *info, int wait)
474{
475 preempt_disable();
476 smp_call_function_many(cpu_online_mask, func, info, wait);
477 preempt_enable();
478
479 return 0;
480}
481EXPORT_SYMBOL(smp_call_function);
482
483/* Setup configured maximum number of CPUs to activate */
484unsigned int setup_max_cpus = NR_CPUS;
485EXPORT_SYMBOL(setup_max_cpus);
486
487
488/*
489 * Setup routine for controlling SMP activation
490 *
491 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
492 * activation entirely (the MPS table probe still happens, though).
493 *
494 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
495 * greater than 0, limits the maximum number of CPUs activated in
496 * SMP mode to <NUM>.
497 */
498
499void __weak arch_disable_smp_support(void) { }
500
501static int __init nosmp(char *str)
502{
503 setup_max_cpus = 0;
504 arch_disable_smp_support();
505
506 return 0;
507}
508
509early_param("nosmp", nosmp);
510
511/* this is hard limit */
512static int __init nrcpus(char *str)
513{
514 int nr_cpus;
515
516 get_option(&str, &nr_cpus);
517 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
518 nr_cpu_ids = nr_cpus;
519
520 return 0;
521}
522
523early_param("nr_cpus", nrcpus);
524
525static int __init maxcpus(char *str)
526{
527 get_option(&str, &setup_max_cpus);
528 if (setup_max_cpus == 0)
529 arch_disable_smp_support();
530
531 return 0;
532}
533
534early_param("maxcpus", maxcpus);
535
536/* Setup number of possible processor ids */
537int nr_cpu_ids __read_mostly = NR_CPUS;
538EXPORT_SYMBOL(nr_cpu_ids);
539
540/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
541void __init setup_nr_cpu_ids(void)
542{
543 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
544}
545
546void __weak smp_announce(void)
547{
548 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
549}
550
551/* Called by boot processor to activate the rest. */
552void __init smp_init(void)
553{
554 unsigned int cpu;
555
556 idle_threads_init();
557 cpuhp_threads_init();
558
559 /* FIXME: This should be done in userspace --RR */
560 for_each_present_cpu(cpu) {
561 if (num_online_cpus() >= setup_max_cpus)
562 break;
563 if (!cpu_online(cpu))
564 cpu_up(cpu);
565 }
566
567 /* Final decision about SMT support */
568 cpu_smt_check_topology();
569 /* Any cleanup work */
570 smp_announce();
571 smp_cpus_done(setup_max_cpus);
572}
573
574/*
575 * Call a function on all processors. May be used during early boot while
576 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
577 * of local_irq_disable/enable().
578 */
579int on_each_cpu(void (*func) (void *info), void *info, int wait)
580{
581 unsigned long flags;
582 int ret = 0;
583
584 preempt_disable();
585 ret = smp_call_function(func, info, wait);
586 local_irq_save(flags);
587 func(info);
588 local_irq_restore(flags);
589 preempt_enable();
590 return ret;
591}
592EXPORT_SYMBOL(on_each_cpu);
593
594/**
595 * on_each_cpu_mask(): Run a function on processors specified by
596 * cpumask, which may include the local processor.
597 * @mask: The set of cpus to run on (only runs on online subset).
598 * @func: The function to run. This must be fast and non-blocking.
599 * @info: An arbitrary pointer to pass to the function.
600 * @wait: If true, wait (atomically) until function has completed
601 * on other CPUs.
602 *
603 * If @wait is true, then returns once @func has returned.
604 *
605 * You must not call this function with disabled interrupts or from a
606 * hardware interrupt handler or from a bottom half handler. The
607 * exception is that it may be used during early boot while
608 * early_boot_irqs_disabled is set.
609 */
610void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
611 void *info, bool wait)
612{
613 int cpu = get_cpu();
614
615 smp_call_function_many(mask, func, info, wait);
616 if (cpumask_test_cpu(cpu, mask)) {
617 unsigned long flags;
618 local_irq_save(flags);
619 func(info);
620 local_irq_restore(flags);
621 }
622 put_cpu();
623}
624EXPORT_SYMBOL(on_each_cpu_mask);
625
626/*
627 * on_each_cpu_cond(): Call a function on each processor for which
628 * the supplied function cond_func returns true, optionally waiting
629 * for all the required CPUs to finish. This may include the local
630 * processor.
631 * @cond_func: A callback function that is passed a cpu id and
632 * the the info parameter. The function is called
633 * with preemption disabled. The function should
634 * return a blooean value indicating whether to IPI
635 * the specified CPU.
636 * @func: The function to run on all applicable CPUs.
637 * This must be fast and non-blocking.
638 * @info: An arbitrary pointer to pass to both functions.
639 * @wait: If true, wait (atomically) until function has
640 * completed on other CPUs.
641 * @gfp_flags: GFP flags to use when allocating the cpumask
642 * used internally by the function.
643 *
644 * The function might sleep if the GFP flags indicates a non
645 * atomic allocation is allowed.
646 *
647 * Preemption is disabled to protect against CPUs going offline but not online.
648 * CPUs going online during the call will not be seen or sent an IPI.
649 *
650 * You must not call this function with disabled interrupts or
651 * from a hardware interrupt handler or from a bottom half handler.
652 */
653void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
654 smp_call_func_t func, void *info, bool wait,
655 gfp_t gfp_flags)
656{
657 cpumask_var_t cpus;
658 int cpu, ret;
659
660 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
661
662 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
663 preempt_disable();
664 for_each_online_cpu(cpu)
665 if (cond_func(cpu, info))
666 cpumask_set_cpu(cpu, cpus);
667 on_each_cpu_mask(cpus, func, info, wait);
668 preempt_enable();
669 free_cpumask_var(cpus);
670 } else {
671 /*
672 * No free cpumask, bother. No matter, we'll
673 * just have to IPI them one by one.
674 */
675 preempt_disable();
676 for_each_online_cpu(cpu)
677 if (cond_func(cpu, info)) {
678 ret = smp_call_function_single(cpu, func,
679 info, wait);
680 WARN_ON_ONCE(ret);
681 }
682 preempt_enable();
683 }
684}
685EXPORT_SYMBOL(on_each_cpu_cond);
686
687static void do_nothing(void *unused)
688{
689}
690
691/**
692 * kick_all_cpus_sync - Force all cpus out of idle
693 *
694 * Used to synchronize the update of pm_idle function pointer. It's
695 * called after the pointer is updated and returns after the dummy
696 * callback function has been executed on all cpus. The execution of
697 * the function can only happen on the remote cpus after they have
698 * left the idle function which had been called via pm_idle function
699 * pointer. So it's guaranteed that nothing uses the previous pointer
700 * anymore.
701 */
702void kick_all_cpus_sync(void)
703{
704 /* Make sure the change is visible before we kick the cpus */
705 smp_mb();
706 smp_call_function(do_nothing, NULL, 1);
707}
708EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
709
710/**
711 * wake_up_all_idle_cpus - break all cpus out of idle
712 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
713 * including idle polling cpus, for non-idle cpus, we will do nothing
714 * for them.
715 */
716void wake_up_all_idle_cpus(void)
717{
718 int cpu;
719
720 preempt_disable();
721 for_each_online_cpu(cpu) {
722 if (cpu == smp_processor_id())
723 continue;
724
725 wake_up_if_idle(cpu);
726 }
727 preempt_enable();
728}
729EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
730
731/**
732 * smp_call_on_cpu - Call a function on a specific cpu
733 *
734 * Used to call a function on a specific cpu and wait for it to return.
735 * Optionally make sure the call is done on a specified physical cpu via vcpu
736 * pinning in order to support virtualized environments.
737 */
738struct smp_call_on_cpu_struct {
739 struct work_struct work;
740 struct completion done;
741 int (*func)(void *);
742 void *data;
743 int ret;
744 int cpu;
745};
746
747static void smp_call_on_cpu_callback(struct work_struct *work)
748{
749 struct smp_call_on_cpu_struct *sscs;
750
751 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
752 if (sscs->cpu >= 0)
753 hypervisor_pin_vcpu(sscs->cpu);
754 sscs->ret = sscs->func(sscs->data);
755 if (sscs->cpu >= 0)
756 hypervisor_pin_vcpu(-1);
757
758 complete(&sscs->done);
759}
760
761int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
762{
763 struct smp_call_on_cpu_struct sscs = {
764 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
765 .func = func,
766 .data = par,
767 .cpu = phys ? cpu : -1,
768 };
769
770 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
771
772 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
773 return -ENXIO;
774
775 queue_work_on(cpu, system_wq, &sscs.work);
776 wait_for_completion(&sscs.done);
777
778 return sscs.ret;
779}
780EXPORT_SYMBOL_GPL(smp_call_on_cpu);
781