Home | History | Annotate | Line # | Download | only in kern
subr_ipi.c revision 1.2.6.3
      1  1.2.6.2       tls /*	$NetBSD: subr_ipi.c,v 1.2.6.3 2017/12/03 11:38:45 jdolecek Exp $	*/
      2  1.2.6.2       tls 
      3  1.2.6.2       tls /*-
      4  1.2.6.2       tls  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  1.2.6.2       tls  * All rights reserved.
      6  1.2.6.2       tls  *
      7  1.2.6.2       tls  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2.6.2       tls  * by Mindaugas Rasiukevicius.
      9  1.2.6.2       tls  *
     10  1.2.6.2       tls  * Redistribution and use in source and binary forms, with or without
     11  1.2.6.2       tls  * modification, are permitted provided that the following conditions
     12  1.2.6.2       tls  * are met:
     13  1.2.6.2       tls  * 1. Redistributions of source code must retain the above copyright
     14  1.2.6.2       tls  *    notice, this list of conditions and the following disclaimer.
     15  1.2.6.2       tls  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.2.6.2       tls  *    notice, this list of conditions and the following disclaimer in the
     17  1.2.6.2       tls  *    documentation and/or other materials provided with the distribution.
     18  1.2.6.2       tls  *
     19  1.2.6.2       tls  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.2.6.2       tls  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.2.6.2       tls  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.2.6.2       tls  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.2.6.2       tls  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.2.6.2       tls  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.2.6.2       tls  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.2.6.2       tls  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.2.6.2       tls  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.2.6.2       tls  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.2.6.2       tls  * POSSIBILITY OF SUCH DAMAGE.
     30  1.2.6.2       tls  */
     31  1.2.6.2       tls 
     32  1.2.6.2       tls /*
     33  1.2.6.2       tls  * Inter-processor interrupt (IPI) interface: asynchronous IPIs to
     34  1.2.6.2       tls  * invoke functions with a constant argument and synchronous IPIs
     35  1.2.6.2       tls  * with the cross-call support.
     36  1.2.6.2       tls  */
     37  1.2.6.2       tls 
     38  1.2.6.2       tls #include <sys/cdefs.h>
     39  1.2.6.2       tls __KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.2.6.3 2017/12/03 11:38:45 jdolecek Exp $");
     40  1.2.6.2       tls 
     41  1.2.6.2       tls #include <sys/param.h>
     42  1.2.6.2       tls #include <sys/types.h>
     43  1.2.6.2       tls 
     44  1.2.6.2       tls #include <sys/atomic.h>
     45  1.2.6.2       tls #include <sys/evcnt.h>
     46  1.2.6.2       tls #include <sys/cpu.h>
     47  1.2.6.2       tls #include <sys/ipi.h>
     48  1.2.6.3  jdolecek #include <sys/intr.h>
     49  1.2.6.2       tls #include <sys/kcpuset.h>
     50  1.2.6.2       tls #include <sys/kmem.h>
     51  1.2.6.2       tls #include <sys/lock.h>
     52  1.2.6.2       tls #include <sys/mutex.h>
     53  1.2.6.2       tls 
     54  1.2.6.2       tls /*
     55  1.2.6.2       tls  * An array of the IPI handlers used for asynchronous invocation.
     56  1.2.6.2       tls  * The lock protects the slot allocation.
     57  1.2.6.2       tls  */
     58  1.2.6.2       tls 
     59  1.2.6.2       tls typedef struct {
     60  1.2.6.2       tls 	ipi_func_t	func;
     61  1.2.6.2       tls 	void *		arg;
     62  1.2.6.2       tls } ipi_intr_t;
     63  1.2.6.2       tls 
     64  1.2.6.2       tls static kmutex_t		ipi_mngmt_lock;
     65  1.2.6.2       tls static ipi_intr_t	ipi_intrs[IPI_MAXREG]	__cacheline_aligned;
     66  1.2.6.2       tls 
     67  1.2.6.2       tls /*
     68  1.2.6.2       tls  * Per-CPU mailbox for IPI messages: it is a single cache line storing
     69  1.2.6.2       tls  * up to IPI_MSG_MAX messages.  This interface is built on top of the
     70  1.2.6.2       tls  * synchronous IPIs.
     71  1.2.6.2       tls  */
     72  1.2.6.2       tls 
     73  1.2.6.2       tls #define	IPI_MSG_SLOTS	(CACHE_LINE_SIZE / sizeof(ipi_msg_t *))
     74  1.2.6.2       tls #define	IPI_MSG_MAX	IPI_MSG_SLOTS
     75  1.2.6.2       tls 
     76  1.2.6.2       tls typedef struct {
     77  1.2.6.2       tls 	ipi_msg_t *	msg[IPI_MSG_SLOTS];
     78  1.2.6.2       tls } ipi_mbox_t;
     79  1.2.6.2       tls 
     80  1.2.6.2       tls 
     81  1.2.6.2       tls /* Mailboxes for the synchronous IPIs. */
     82  1.2.6.2       tls static ipi_mbox_t *	ipi_mboxes	__read_mostly;
     83  1.2.6.2       tls static struct evcnt	ipi_mboxfull_ev	__cacheline_aligned;
     84  1.2.6.2       tls static void		ipi_msg_cpu_handler(void *);
     85  1.2.6.2       tls 
     86  1.2.6.2       tls /* Handler for the synchronous IPIs - it must be zero. */
     87  1.2.6.2       tls #define	IPI_SYNCH_ID	0
     88  1.2.6.2       tls 
     89  1.2.6.2       tls #ifndef MULTIPROCESSOR
     90  1.2.6.2       tls #define	cpu_ipi(ci)	KASSERT(ci == NULL)
     91  1.2.6.2       tls #endif
     92  1.2.6.2       tls 
     93  1.2.6.2       tls void
     94  1.2.6.2       tls ipi_sysinit(void)
     95  1.2.6.2       tls {
     96  1.2.6.2       tls 	const size_t len = ncpu * sizeof(ipi_mbox_t);
     97  1.2.6.2       tls 
     98  1.2.6.2       tls 	/* Initialise the per-CPU bit fields. */
     99  1.2.6.2       tls 	for (u_int i = 0; i < ncpu; i++) {
    100  1.2.6.2       tls 		struct cpu_info *ci = cpu_lookup(i);
    101  1.2.6.2       tls 		memset(&ci->ci_ipipend, 0, sizeof(ci->ci_ipipend));
    102  1.2.6.2       tls 	}
    103  1.2.6.2       tls 	mutex_init(&ipi_mngmt_lock, MUTEX_DEFAULT, IPL_NONE);
    104  1.2.6.2       tls 	memset(ipi_intrs, 0, sizeof(ipi_intrs));
    105  1.2.6.2       tls 
    106  1.2.6.2       tls 	/* Allocate per-CPU IPI mailboxes. */
    107  1.2.6.2       tls 	ipi_mboxes = kmem_zalloc(len, KM_SLEEP);
    108  1.2.6.2       tls 	KASSERT(ipi_mboxes != NULL);
    109  1.2.6.2       tls 
    110  1.2.6.2       tls 	/*
    111  1.2.6.2       tls 	 * Register the handler for synchronous IPIs.  This mechanism
    112  1.2.6.2       tls 	 * is built on top of the asynchronous interface.  Slot zero is
    113  1.2.6.2       tls 	 * reserved permanently; it is also handy to use zero as a failure
    114  1.2.6.2       tls 	 * for other registers (as it is potentially less error-prone).
    115  1.2.6.2       tls 	 */
    116  1.2.6.2       tls 	ipi_intrs[IPI_SYNCH_ID].func = ipi_msg_cpu_handler;
    117  1.2.6.2       tls 
    118  1.2.6.2       tls 	evcnt_attach_dynamic(&ipi_mboxfull_ev, EVCNT_TYPE_MISC, NULL,
    119  1.2.6.2       tls 	   "ipi", "full");
    120  1.2.6.2       tls }
    121  1.2.6.2       tls 
    122  1.2.6.2       tls /*
    123  1.2.6.2       tls  * ipi_register: register an asynchronous IPI handler.
    124  1.2.6.2       tls  *
    125  1.2.6.2       tls  * => Returns IPI ID which is greater than zero; on failure - zero.
    126  1.2.6.2       tls  */
    127  1.2.6.2       tls u_int
    128  1.2.6.2       tls ipi_register(ipi_func_t func, void *arg)
    129  1.2.6.2       tls {
    130  1.2.6.2       tls 	mutex_enter(&ipi_mngmt_lock);
    131  1.2.6.2       tls 	for (u_int i = 0; i < IPI_MAXREG; i++) {
    132  1.2.6.2       tls 		if (ipi_intrs[i].func == NULL) {
    133  1.2.6.2       tls 			/* Register the function. */
    134  1.2.6.2       tls 			ipi_intrs[i].func = func;
    135  1.2.6.2       tls 			ipi_intrs[i].arg = arg;
    136  1.2.6.2       tls 			mutex_exit(&ipi_mngmt_lock);
    137  1.2.6.2       tls 
    138  1.2.6.2       tls 			KASSERT(i != IPI_SYNCH_ID);
    139  1.2.6.2       tls 			return i;
    140  1.2.6.2       tls 		}
    141  1.2.6.2       tls 	}
    142  1.2.6.2       tls 	mutex_exit(&ipi_mngmt_lock);
    143  1.2.6.2       tls 	printf("WARNING: ipi_register: table full, increase IPI_MAXREG\n");
    144  1.2.6.2       tls 	return 0;
    145  1.2.6.2       tls }
    146  1.2.6.2       tls 
    147  1.2.6.2       tls /*
    148  1.2.6.2       tls  * ipi_unregister: release the IPI handler given the ID.
    149  1.2.6.2       tls  */
    150  1.2.6.2       tls void
    151  1.2.6.2       tls ipi_unregister(u_int ipi_id)
    152  1.2.6.2       tls {
    153  1.2.6.2       tls 	ipi_msg_t ipimsg = { .func = (ipi_func_t)nullop };
    154  1.2.6.2       tls 
    155  1.2.6.2       tls 	KASSERT(ipi_id != IPI_SYNCH_ID);
    156  1.2.6.2       tls 	KASSERT(ipi_id < IPI_MAXREG);
    157  1.2.6.2       tls 
    158  1.2.6.2       tls 	/* Release the slot. */
    159  1.2.6.2       tls 	mutex_enter(&ipi_mngmt_lock);
    160  1.2.6.2       tls 	KASSERT(ipi_intrs[ipi_id].func != NULL);
    161  1.2.6.2       tls 	ipi_intrs[ipi_id].func = NULL;
    162  1.2.6.2       tls 
    163  1.2.6.2       tls 	/* Ensure that there are no IPIs in flight. */
    164  1.2.6.2       tls 	kpreempt_disable();
    165  1.2.6.2       tls 	ipi_broadcast(&ipimsg);
    166  1.2.6.2       tls 	ipi_wait(&ipimsg);
    167  1.2.6.2       tls 	kpreempt_enable();
    168  1.2.6.2       tls 	mutex_exit(&ipi_mngmt_lock);
    169  1.2.6.2       tls }
    170  1.2.6.2       tls 
    171  1.2.6.2       tls /*
    172  1.2.6.2       tls  * ipi_trigger: asynchronously send an IPI to the specified CPU.
    173  1.2.6.2       tls  */
    174  1.2.6.2       tls void
    175  1.2.6.2       tls ipi_trigger(u_int ipi_id, struct cpu_info *ci)
    176  1.2.6.2       tls {
    177  1.2.6.2       tls 	const u_int i = ipi_id >> IPI_BITW_SHIFT;
    178  1.2.6.2       tls 	const uint32_t bitm = 1U << (ipi_id & IPI_BITW_MASK);
    179  1.2.6.2       tls 
    180  1.2.6.2       tls 	KASSERT(ipi_id < IPI_MAXREG);
    181  1.2.6.2       tls 	KASSERT(kpreempt_disabled());
    182  1.2.6.2       tls 	KASSERT(curcpu() != ci);
    183  1.2.6.2       tls 
    184  1.2.6.2       tls 	/* Mark as pending and send an IPI. */
    185  1.2.6.2       tls 	if (membar_consumer(), (ci->ci_ipipend[i] & bitm) == 0) {
    186  1.2.6.2       tls 		atomic_or_32(&ci->ci_ipipend[i], bitm);
    187  1.2.6.2       tls 		cpu_ipi(ci);
    188  1.2.6.2       tls 	}
    189  1.2.6.2       tls }
    190  1.2.6.2       tls 
    191  1.2.6.2       tls /*
    192  1.2.6.3  jdolecek  * ipi_trigger_multi: same as ipi_trigger() but sends to the multiple
    193  1.2.6.3  jdolecek  * CPUs given the target CPU set.
    194  1.2.6.3  jdolecek  */
    195  1.2.6.3  jdolecek void
    196  1.2.6.3  jdolecek ipi_trigger_multi(u_int ipi_id, const kcpuset_t *target)
    197  1.2.6.3  jdolecek {
    198  1.2.6.3  jdolecek 	const cpuid_t selfid = cpu_index(curcpu());
    199  1.2.6.3  jdolecek 	CPU_INFO_ITERATOR cii;
    200  1.2.6.3  jdolecek 	struct cpu_info *ci;
    201  1.2.6.3  jdolecek 
    202  1.2.6.3  jdolecek 	KASSERT(kpreempt_disabled());
    203  1.2.6.3  jdolecek 	KASSERT(target != NULL);
    204  1.2.6.3  jdolecek 
    205  1.2.6.3  jdolecek 	for (CPU_INFO_FOREACH(cii, ci)) {
    206  1.2.6.3  jdolecek 		const cpuid_t cpuid = cpu_index(ci);
    207  1.2.6.3  jdolecek 
    208  1.2.6.3  jdolecek 		if (!kcpuset_isset(target, cpuid) || cpuid == selfid) {
    209  1.2.6.3  jdolecek 			continue;
    210  1.2.6.3  jdolecek 		}
    211  1.2.6.3  jdolecek 		ipi_trigger(ipi_id, ci);
    212  1.2.6.3  jdolecek 	}
    213  1.2.6.3  jdolecek 	if (kcpuset_isset(target, selfid)) {
    214  1.2.6.3  jdolecek 		int s = splhigh();
    215  1.2.6.3  jdolecek 		ipi_cpu_handler();
    216  1.2.6.3  jdolecek 		splx(s);
    217  1.2.6.3  jdolecek 	}
    218  1.2.6.3  jdolecek }
    219  1.2.6.3  jdolecek 
    220  1.2.6.3  jdolecek /*
    221  1.2.6.2       tls  * put_msg: insert message into the mailbox.
    222  1.2.6.2       tls  */
    223  1.2.6.2       tls static inline void
    224  1.2.6.2       tls put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
    225  1.2.6.2       tls {
    226  1.2.6.2       tls 	int count = SPINLOCK_BACKOFF_MIN;
    227  1.2.6.2       tls again:
    228  1.2.6.2       tls 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
    229  1.2.6.2       tls 		if (__predict_true(mbox->msg[i] == NULL) &&
    230  1.2.6.2       tls 		    atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
    231  1.2.6.2       tls 			return;
    232  1.2.6.2       tls 		}
    233  1.2.6.2       tls 	}
    234  1.2.6.2       tls 
    235  1.2.6.2       tls 	/* All slots are full: we have to spin-wait. */
    236  1.2.6.2       tls 	ipi_mboxfull_ev.ev_count++;
    237  1.2.6.2       tls 	SPINLOCK_BACKOFF(count);
    238  1.2.6.2       tls 	goto again;
    239  1.2.6.2       tls }
    240  1.2.6.2       tls 
    241  1.2.6.2       tls /*
    242  1.2.6.2       tls  * ipi_cpu_handler: the IPI handler.
    243  1.2.6.2       tls  */
    244  1.2.6.2       tls void
    245  1.2.6.2       tls ipi_cpu_handler(void)
    246  1.2.6.2       tls {
    247  1.2.6.2       tls 	struct cpu_info * const ci = curcpu();
    248  1.2.6.2       tls 
    249  1.2.6.2       tls 	/*
    250  1.2.6.2       tls 	 * Handle asynchronous IPIs: inspect per-CPU bit field, extract
    251  1.2.6.2       tls 	 * IPI ID numbers and execute functions in those slots.
    252  1.2.6.2       tls 	 */
    253  1.2.6.2       tls 	for (u_int i = 0; i < IPI_BITWORDS; i++) {
    254  1.2.6.2       tls 		uint32_t pending, bit;
    255  1.2.6.2       tls 
    256  1.2.6.2       tls 		if (ci->ci_ipipend[i] == 0) {
    257  1.2.6.2       tls 			continue;
    258  1.2.6.2       tls 		}
    259  1.2.6.2       tls 		pending = atomic_swap_32(&ci->ci_ipipend[i], 0);
    260  1.2.6.2       tls #ifndef __HAVE_ATOMIC_AS_MEMBAR
    261  1.2.6.2       tls 		membar_producer();
    262  1.2.6.2       tls #endif
    263  1.2.6.2       tls 		while ((bit = ffs(pending)) != 0) {
    264  1.2.6.2       tls 			const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit;
    265  1.2.6.2       tls 			ipi_intr_t *ipi_hdl = &ipi_intrs[ipi_id];
    266  1.2.6.2       tls 
    267  1.2.6.2       tls 			pending &= ~(1U << bit);
    268  1.2.6.2       tls 			KASSERT(ipi_hdl->func != NULL);
    269  1.2.6.2       tls 			ipi_hdl->func(ipi_hdl->arg);
    270  1.2.6.2       tls 		}
    271  1.2.6.2       tls 	}
    272  1.2.6.2       tls }
    273  1.2.6.2       tls 
    274  1.2.6.2       tls /*
    275  1.2.6.2       tls  * ipi_msg_cpu_handler: handle synchronous IPIs - iterate mailbox,
    276  1.2.6.2       tls  * execute the passed functions and acknowledge the messages.
    277  1.2.6.2       tls  */
    278  1.2.6.2       tls static void
    279  1.2.6.2       tls ipi_msg_cpu_handler(void *arg __unused)
    280  1.2.6.2       tls {
    281  1.2.6.2       tls 	const struct cpu_info * const ci = curcpu();
    282  1.2.6.2       tls 	ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
    283  1.2.6.2       tls 
    284  1.2.6.2       tls 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
    285  1.2.6.2       tls 		ipi_msg_t *msg;
    286  1.2.6.2       tls 
    287  1.2.6.2       tls 		/* Get the message. */
    288  1.2.6.2       tls 		if ((msg = mbox->msg[i]) == NULL) {
    289  1.2.6.2       tls 			continue;
    290  1.2.6.2       tls 		}
    291  1.2.6.2       tls 		mbox->msg[i] = NULL;
    292  1.2.6.2       tls 
    293  1.2.6.2       tls 		/* Execute the handler. */
    294  1.2.6.2       tls 		KASSERT(msg->func);
    295  1.2.6.2       tls 		msg->func(msg->arg);
    296  1.2.6.2       tls 
    297  1.2.6.2       tls 		/* Ack the request. */
    298  1.2.6.2       tls 		atomic_dec_uint(&msg->_pending);
    299  1.2.6.2       tls 	}
    300  1.2.6.2       tls }
    301  1.2.6.2       tls 
    302  1.2.6.2       tls /*
    303  1.2.6.2       tls  * ipi_unicast: send an IPI to a single CPU.
    304  1.2.6.2       tls  *
    305  1.2.6.2       tls  * => The CPU must be remote; must not be local.
    306  1.2.6.2       tls  * => The caller must ipi_wait() on the message for completion.
    307  1.2.6.2       tls  */
    308  1.2.6.2       tls void
    309  1.2.6.2       tls ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
    310  1.2.6.2       tls {
    311  1.2.6.2       tls 	const cpuid_t id = cpu_index(ci);
    312  1.2.6.2       tls 
    313  1.2.6.2       tls 	KASSERT(msg->func != NULL);
    314  1.2.6.2       tls 	KASSERT(kpreempt_disabled());
    315  1.2.6.2       tls 	KASSERT(curcpu() != ci);
    316  1.2.6.2       tls 
    317  1.2.6.2       tls 	msg->_pending = 1;
    318  1.2.6.2       tls 	membar_producer();
    319  1.2.6.2       tls 
    320  1.2.6.2       tls 	put_msg(&ipi_mboxes[id], msg);
    321  1.2.6.2       tls 	ipi_trigger(IPI_SYNCH_ID, ci);
    322  1.2.6.2       tls }
    323  1.2.6.2       tls 
    324  1.2.6.2       tls /*
    325  1.2.6.2       tls  * ipi_multicast: send an IPI to each CPU in the specified set.
    326  1.2.6.2       tls  *
    327  1.2.6.2       tls  * => The caller must ipi_wait() on the message for completion.
    328  1.2.6.2       tls  */
    329  1.2.6.2       tls void
    330  1.2.6.2       tls ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
    331  1.2.6.2       tls {
    332  1.2.6.2       tls 	const struct cpu_info * const self = curcpu();
    333  1.2.6.2       tls 	CPU_INFO_ITERATOR cii;
    334  1.2.6.2       tls 	struct cpu_info *ci;
    335  1.2.6.2       tls 	u_int local;
    336  1.2.6.2       tls 
    337  1.2.6.2       tls 	KASSERT(msg->func != NULL);
    338  1.2.6.2       tls 	KASSERT(kpreempt_disabled());
    339  1.2.6.2       tls 
    340  1.2.6.2       tls 	local = !!kcpuset_isset(target, cpu_index(self));
    341  1.2.6.2       tls 	msg->_pending = kcpuset_countset(target) - local;
    342  1.2.6.2       tls 	membar_producer();
    343  1.2.6.2       tls 
    344  1.2.6.2       tls 	for (CPU_INFO_FOREACH(cii, ci)) {
    345  1.2.6.2       tls 		cpuid_t id;
    346  1.2.6.2       tls 
    347  1.2.6.2       tls 		if (__predict_false(ci == self)) {
    348  1.2.6.2       tls 			continue;
    349  1.2.6.2       tls 		}
    350  1.2.6.2       tls 		id = cpu_index(ci);
    351  1.2.6.2       tls 		if (!kcpuset_isset(target, id)) {
    352  1.2.6.2       tls 			continue;
    353  1.2.6.2       tls 		}
    354  1.2.6.2       tls 		put_msg(&ipi_mboxes[id], msg);
    355  1.2.6.2       tls 		ipi_trigger(IPI_SYNCH_ID, ci);
    356  1.2.6.2       tls 	}
    357  1.2.6.2       tls 	if (local) {
    358  1.2.6.2       tls 		msg->func(msg->arg);
    359  1.2.6.2       tls 	}
    360  1.2.6.2       tls }
    361  1.2.6.2       tls 
    362  1.2.6.2       tls /*
    363  1.2.6.2       tls  * ipi_broadcast: send an IPI to all CPUs.
    364  1.2.6.2       tls  *
    365  1.2.6.2       tls  * => The caller must ipi_wait() on the message for completion.
    366  1.2.6.2       tls  */
    367  1.2.6.2       tls void
    368  1.2.6.2       tls ipi_broadcast(ipi_msg_t *msg)
    369  1.2.6.2       tls {
    370  1.2.6.2       tls 	const struct cpu_info * const self = curcpu();
    371  1.2.6.2       tls 	CPU_INFO_ITERATOR cii;
    372  1.2.6.2       tls 	struct cpu_info *ci;
    373  1.2.6.2       tls 
    374  1.2.6.2       tls 	KASSERT(msg->func != NULL);
    375  1.2.6.2       tls 	KASSERT(kpreempt_disabled());
    376  1.2.6.2       tls 
    377  1.2.6.2       tls 	msg->_pending = ncpu - 1;
    378  1.2.6.2       tls 	membar_producer();
    379  1.2.6.2       tls 
    380  1.2.6.2       tls 	/* Broadcast IPIs for remote CPUs. */
    381  1.2.6.2       tls 	for (CPU_INFO_FOREACH(cii, ci)) {
    382  1.2.6.2       tls 		cpuid_t id;
    383  1.2.6.2       tls 
    384  1.2.6.2       tls 		if (__predict_false(ci == self)) {
    385  1.2.6.2       tls 			continue;
    386  1.2.6.2       tls 		}
    387  1.2.6.2       tls 		id = cpu_index(ci);
    388  1.2.6.2       tls 		put_msg(&ipi_mboxes[id], msg);
    389  1.2.6.2       tls 		ipi_trigger(IPI_SYNCH_ID, ci);
    390  1.2.6.2       tls 	}
    391  1.2.6.2       tls 
    392  1.2.6.2       tls 	/* Finally, execute locally. */
    393  1.2.6.2       tls 	msg->func(msg->arg);
    394  1.2.6.2       tls }
    395  1.2.6.2       tls 
    396  1.2.6.2       tls /*
    397  1.2.6.2       tls  * ipi_wait: spin-wait until the message is processed.
    398  1.2.6.2       tls  */
    399  1.2.6.2       tls void
    400  1.2.6.2       tls ipi_wait(ipi_msg_t *msg)
    401  1.2.6.2       tls {
    402  1.2.6.2       tls 	int count = SPINLOCK_BACKOFF_MIN;
    403  1.2.6.2       tls 
    404  1.2.6.2       tls 	while (msg->_pending) {
    405  1.2.6.2       tls 		KASSERT(msg->_pending < ncpu);
    406  1.2.6.2       tls 		SPINLOCK_BACKOFF(count);
    407  1.2.6.2       tls 	}
    408  1.2.6.2       tls }
    409