Home | History | Annotate | Line # | Download | only in kern
subr_ipi.c revision 1.3.18.2
      1  1.3.18.2    martin /*	$NetBSD: subr_ipi.c,v 1.3.18.2 2020/04/13 08:05:04 martin Exp $	*/
      2       1.1     rmind 
      3       1.1     rmind /*-
      4       1.1     rmind  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5       1.1     rmind  * All rights reserved.
      6       1.1     rmind  *
      7       1.1     rmind  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1     rmind  * by Mindaugas Rasiukevicius.
      9       1.1     rmind  *
     10       1.1     rmind  * Redistribution and use in source and binary forms, with or without
     11       1.1     rmind  * modification, are permitted provided that the following conditions
     12       1.1     rmind  * are met:
     13       1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     14       1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     15       1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     17       1.1     rmind  *    documentation and/or other materials provided with the distribution.
     18       1.1     rmind  *
     19       1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1     rmind  */
     31       1.1     rmind 
     32       1.1     rmind /*
     33       1.2     rmind  * Inter-processor interrupt (IPI) interface: asynchronous IPIs to
     34       1.2     rmind  * invoke functions with a constant argument and synchronous IPIs
     35       1.2     rmind  * with the cross-call support.
     36       1.1     rmind  */
     37       1.1     rmind 
     38       1.1     rmind #include <sys/cdefs.h>
     39  1.3.18.2    martin __KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.3.18.2 2020/04/13 08:05:04 martin Exp $");
     40       1.1     rmind 
     41       1.1     rmind #include <sys/param.h>
     42       1.1     rmind #include <sys/types.h>
     43       1.1     rmind 
     44       1.1     rmind #include <sys/atomic.h>
     45       1.1     rmind #include <sys/evcnt.h>
     46       1.1     rmind #include <sys/cpu.h>
     47       1.1     rmind #include <sys/ipi.h>
     48       1.3     rmind #include <sys/intr.h>
     49       1.1     rmind #include <sys/kcpuset.h>
     50       1.1     rmind #include <sys/kmem.h>
     51       1.1     rmind #include <sys/lock.h>
     52       1.2     rmind #include <sys/mutex.h>
     53       1.2     rmind 
     54       1.2     rmind /*
     55       1.2     rmind  * An array of the IPI handlers used for asynchronous invocation.
     56       1.2     rmind  * The lock protects the slot allocation.
     57       1.2     rmind  */
     58       1.2     rmind 
     59       1.2     rmind typedef struct {
     60       1.2     rmind 	ipi_func_t	func;
     61       1.2     rmind 	void *		arg;
     62       1.2     rmind } ipi_intr_t;
     63       1.2     rmind 
     64       1.2     rmind static kmutex_t		ipi_mngmt_lock;
     65       1.2     rmind static ipi_intr_t	ipi_intrs[IPI_MAXREG]	__cacheline_aligned;
     66       1.1     rmind 
     67       1.1     rmind /*
     68       1.1     rmind  * Per-CPU mailbox for IPI messages: it is a single cache line storing
     69       1.2     rmind  * up to IPI_MSG_MAX messages.  This interface is built on top of the
     70       1.2     rmind  * synchronous IPIs.
     71       1.1     rmind  */
     72       1.1     rmind 
     73       1.1     rmind #define	IPI_MSG_SLOTS	(CACHE_LINE_SIZE / sizeof(ipi_msg_t *))
     74       1.1     rmind #define	IPI_MSG_MAX	IPI_MSG_SLOTS
     75       1.1     rmind 
     76       1.1     rmind typedef struct {
     77       1.1     rmind 	ipi_msg_t *	msg[IPI_MSG_SLOTS];
     78       1.1     rmind } ipi_mbox_t;
     79       1.1     rmind 
     80       1.2     rmind 
     81       1.2     rmind /* Mailboxes for the synchronous IPIs. */
     82       1.1     rmind static ipi_mbox_t *	ipi_mboxes	__read_mostly;
     83       1.1     rmind static struct evcnt	ipi_mboxfull_ev	__cacheline_aligned;
     84       1.2     rmind static void		ipi_msg_cpu_handler(void *);
     85       1.2     rmind 
     86       1.2     rmind /* Handler for the synchronous IPIs - it must be zero. */
     87       1.2     rmind #define	IPI_SYNCH_ID	0
     88       1.1     rmind 
     89       1.1     rmind #ifndef MULTIPROCESSOR
     90       1.1     rmind #define	cpu_ipi(ci)	KASSERT(ci == NULL)
     91       1.1     rmind #endif
     92       1.1     rmind 
     93       1.1     rmind void
     94       1.1     rmind ipi_sysinit(void)
     95       1.1     rmind {
     96       1.1     rmind 	const size_t len = ncpu * sizeof(ipi_mbox_t);
     97       1.1     rmind 
     98       1.2     rmind 	/* Initialise the per-CPU bit fields. */
     99       1.2     rmind 	for (u_int i = 0; i < ncpu; i++) {
    100       1.2     rmind 		struct cpu_info *ci = cpu_lookup(i);
    101       1.2     rmind 		memset(&ci->ci_ipipend, 0, sizeof(ci->ci_ipipend));
    102       1.2     rmind 	}
    103       1.2     rmind 	mutex_init(&ipi_mngmt_lock, MUTEX_DEFAULT, IPL_NONE);
    104       1.2     rmind 	memset(ipi_intrs, 0, sizeof(ipi_intrs));
    105       1.2     rmind 
    106       1.1     rmind 	/* Allocate per-CPU IPI mailboxes. */
    107       1.1     rmind 	ipi_mboxes = kmem_zalloc(len, KM_SLEEP);
    108       1.1     rmind 	KASSERT(ipi_mboxes != NULL);
    109       1.1     rmind 
    110       1.2     rmind 	/*
    111       1.2     rmind 	 * Register the handler for synchronous IPIs.  This mechanism
    112       1.2     rmind 	 * is built on top of the asynchronous interface.  Slot zero is
    113       1.2     rmind 	 * reserved permanently; it is also handy to use zero as a failure
    114       1.2     rmind 	 * for other registers (as it is potentially less error-prone).
    115       1.2     rmind 	 */
    116       1.2     rmind 	ipi_intrs[IPI_SYNCH_ID].func = ipi_msg_cpu_handler;
    117       1.2     rmind 
    118       1.1     rmind 	evcnt_attach_dynamic(&ipi_mboxfull_ev, EVCNT_TYPE_MISC, NULL,
    119       1.1     rmind 	   "ipi", "full");
    120       1.1     rmind }
    121       1.1     rmind 
    122       1.1     rmind /*
    123       1.2     rmind  * ipi_register: register an asynchronous IPI handler.
    124       1.2     rmind  *
    125       1.2     rmind  * => Returns IPI ID which is greater than zero; on failure - zero.
    126       1.2     rmind  */
    127       1.2     rmind u_int
    128       1.2     rmind ipi_register(ipi_func_t func, void *arg)
    129       1.2     rmind {
    130       1.2     rmind 	mutex_enter(&ipi_mngmt_lock);
    131       1.2     rmind 	for (u_int i = 0; i < IPI_MAXREG; i++) {
    132       1.2     rmind 		if (ipi_intrs[i].func == NULL) {
    133       1.2     rmind 			/* Register the function. */
    134       1.2     rmind 			ipi_intrs[i].func = func;
    135       1.2     rmind 			ipi_intrs[i].arg = arg;
    136       1.2     rmind 			mutex_exit(&ipi_mngmt_lock);
    137       1.2     rmind 
    138       1.2     rmind 			KASSERT(i != IPI_SYNCH_ID);
    139       1.2     rmind 			return i;
    140       1.2     rmind 		}
    141       1.2     rmind 	}
    142       1.2     rmind 	mutex_exit(&ipi_mngmt_lock);
    143       1.2     rmind 	printf("WARNING: ipi_register: table full, increase IPI_MAXREG\n");
    144       1.2     rmind 	return 0;
    145       1.2     rmind }
    146       1.2     rmind 
    147       1.2     rmind /*
    148       1.2     rmind  * ipi_unregister: release the IPI handler given the ID.
    149       1.2     rmind  */
    150       1.2     rmind void
    151       1.2     rmind ipi_unregister(u_int ipi_id)
    152       1.2     rmind {
    153  1.3.18.2    martin 	ipi_msg_t ipimsg = { .func = __FPTRCAST(ipi_func_t, nullop) };
    154       1.2     rmind 
    155       1.2     rmind 	KASSERT(ipi_id != IPI_SYNCH_ID);
    156       1.2     rmind 	KASSERT(ipi_id < IPI_MAXREG);
    157       1.2     rmind 
    158       1.2     rmind 	/* Release the slot. */
    159       1.2     rmind 	mutex_enter(&ipi_mngmt_lock);
    160       1.2     rmind 	KASSERT(ipi_intrs[ipi_id].func != NULL);
    161       1.2     rmind 	ipi_intrs[ipi_id].func = NULL;
    162       1.2     rmind 
    163       1.2     rmind 	/* Ensure that there are no IPIs in flight. */
    164       1.2     rmind 	kpreempt_disable();
    165  1.3.18.1  christos 	ipi_broadcast(&ipimsg, false);
    166       1.2     rmind 	ipi_wait(&ipimsg);
    167       1.2     rmind 	kpreempt_enable();
    168       1.2     rmind 	mutex_exit(&ipi_mngmt_lock);
    169       1.2     rmind }
    170       1.2     rmind 
    171       1.2     rmind /*
    172  1.3.18.1  christos  * ipi_mark_pending: internal routine to mark an IPI pending on the
    173  1.3.18.1  christos  * specified CPU (which might be curcpu()).
    174       1.2     rmind  */
    175  1.3.18.1  christos static bool
    176  1.3.18.1  christos ipi_mark_pending(u_int ipi_id, struct cpu_info *ci)
    177       1.2     rmind {
    178       1.2     rmind 	const u_int i = ipi_id >> IPI_BITW_SHIFT;
    179       1.2     rmind 	const uint32_t bitm = 1U << (ipi_id & IPI_BITW_MASK);
    180       1.2     rmind 
    181       1.2     rmind 	KASSERT(ipi_id < IPI_MAXREG);
    182       1.2     rmind 	KASSERT(kpreempt_disabled());
    183       1.2     rmind 
    184       1.2     rmind 	/* Mark as pending and send an IPI. */
    185       1.2     rmind 	if (membar_consumer(), (ci->ci_ipipend[i] & bitm) == 0) {
    186       1.2     rmind 		atomic_or_32(&ci->ci_ipipend[i], bitm);
    187  1.3.18.1  christos 		return true;
    188       1.2     rmind 	}
    189  1.3.18.1  christos 	return false;
    190       1.2     rmind }
    191       1.2     rmind 
    192       1.2     rmind /*
    193  1.3.18.1  christos  * ipi_trigger: asynchronously send an IPI to the specified CPU.
    194       1.3     rmind  */
    195       1.3     rmind void
    196  1.3.18.1  christos ipi_trigger(u_int ipi_id, struct cpu_info *ci)
    197  1.3.18.1  christos {
    198  1.3.18.1  christos 
    199  1.3.18.1  christos 	KASSERT(curcpu() != ci);
    200  1.3.18.1  christos 	if (ipi_mark_pending(ipi_id, ci)) {
    201  1.3.18.1  christos 		cpu_ipi(ci);
    202  1.3.18.1  christos 	}
    203  1.3.18.1  christos }
    204  1.3.18.1  christos 
    205  1.3.18.1  christos /*
    206  1.3.18.1  christos  * ipi_trigger_multi_internal: the guts of ipi_trigger_multi() and
    207  1.3.18.1  christos  * ipi_trigger_broadcast().
    208  1.3.18.1  christos  */
    209  1.3.18.1  christos static void
    210  1.3.18.1  christos ipi_trigger_multi_internal(u_int ipi_id, const kcpuset_t *target,
    211  1.3.18.1  christos     bool skip_self)
    212       1.3     rmind {
    213       1.3     rmind 	const cpuid_t selfid = cpu_index(curcpu());
    214       1.3     rmind 	CPU_INFO_ITERATOR cii;
    215       1.3     rmind 	struct cpu_info *ci;
    216       1.3     rmind 
    217       1.3     rmind 	KASSERT(kpreempt_disabled());
    218       1.3     rmind 	KASSERT(target != NULL);
    219       1.3     rmind 
    220       1.3     rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    221       1.3     rmind 		const cpuid_t cpuid = cpu_index(ci);
    222       1.3     rmind 
    223       1.3     rmind 		if (!kcpuset_isset(target, cpuid) || cpuid == selfid) {
    224       1.3     rmind 			continue;
    225       1.3     rmind 		}
    226       1.3     rmind 		ipi_trigger(ipi_id, ci);
    227       1.3     rmind 	}
    228  1.3.18.1  christos 	if (!skip_self && kcpuset_isset(target, selfid)) {
    229  1.3.18.1  christos 		ipi_mark_pending(ipi_id, curcpu());
    230       1.3     rmind 		int s = splhigh();
    231       1.3     rmind 		ipi_cpu_handler();
    232       1.3     rmind 		splx(s);
    233       1.3     rmind 	}
    234       1.3     rmind }
    235       1.3     rmind 
    236       1.3     rmind /*
    237  1.3.18.1  christos  * ipi_trigger_multi: same as ipi_trigger() but sends to the multiple
    238  1.3.18.1  christos  * CPUs given the target CPU set.
    239  1.3.18.1  christos  */
    240  1.3.18.1  christos void
    241  1.3.18.1  christos ipi_trigger_multi(u_int ipi_id, const kcpuset_t *target)
    242  1.3.18.1  christos {
    243  1.3.18.1  christos 	ipi_trigger_multi_internal(ipi_id, target, false);
    244  1.3.18.1  christos }
    245  1.3.18.1  christos 
    246  1.3.18.1  christos /*
    247  1.3.18.1  christos  * ipi_trigger_broadcast: same as ipi_trigger_multi() to kcpuset_attached,
    248  1.3.18.1  christos  * optionally skipping the sending CPU.
    249  1.3.18.1  christos  */
    250  1.3.18.1  christos void
    251  1.3.18.1  christos ipi_trigger_broadcast(u_int ipi_id, bool skip_self)
    252  1.3.18.1  christos {
    253  1.3.18.1  christos 	ipi_trigger_multi_internal(ipi_id, kcpuset_attached, skip_self);
    254  1.3.18.1  christos }
    255  1.3.18.1  christos 
    256  1.3.18.1  christos /*
    257       1.1     rmind  * put_msg: insert message into the mailbox.
    258       1.1     rmind  */
    259       1.1     rmind static inline void
    260       1.1     rmind put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
    261       1.1     rmind {
    262       1.1     rmind 	int count = SPINLOCK_BACKOFF_MIN;
    263       1.1     rmind again:
    264       1.1     rmind 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
    265       1.1     rmind 		if (__predict_true(mbox->msg[i] == NULL) &&
    266       1.1     rmind 		    atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
    267       1.1     rmind 			return;
    268       1.1     rmind 		}
    269       1.1     rmind 	}
    270       1.1     rmind 
    271       1.1     rmind 	/* All slots are full: we have to spin-wait. */
    272       1.1     rmind 	ipi_mboxfull_ev.ev_count++;
    273       1.1     rmind 	SPINLOCK_BACKOFF(count);
    274       1.1     rmind 	goto again;
    275       1.1     rmind }
    276       1.1     rmind 
    277       1.1     rmind /*
    278       1.1     rmind  * ipi_cpu_handler: the IPI handler.
    279       1.1     rmind  */
    280       1.1     rmind void
    281       1.1     rmind ipi_cpu_handler(void)
    282       1.1     rmind {
    283       1.2     rmind 	struct cpu_info * const ci = curcpu();
    284       1.2     rmind 
    285       1.2     rmind 	/*
    286       1.2     rmind 	 * Handle asynchronous IPIs: inspect per-CPU bit field, extract
    287       1.2     rmind 	 * IPI ID numbers and execute functions in those slots.
    288       1.2     rmind 	 */
    289       1.2     rmind 	for (u_int i = 0; i < IPI_BITWORDS; i++) {
    290       1.2     rmind 		uint32_t pending, bit;
    291       1.2     rmind 
    292       1.2     rmind 		if (ci->ci_ipipend[i] == 0) {
    293       1.2     rmind 			continue;
    294       1.2     rmind 		}
    295       1.2     rmind 		pending = atomic_swap_32(&ci->ci_ipipend[i], 0);
    296       1.2     rmind #ifndef __HAVE_ATOMIC_AS_MEMBAR
    297       1.2     rmind 		membar_producer();
    298       1.2     rmind #endif
    299       1.2     rmind 		while ((bit = ffs(pending)) != 0) {
    300       1.2     rmind 			const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit;
    301       1.2     rmind 			ipi_intr_t *ipi_hdl = &ipi_intrs[ipi_id];
    302       1.2     rmind 
    303       1.2     rmind 			pending &= ~(1U << bit);
    304       1.2     rmind 			KASSERT(ipi_hdl->func != NULL);
    305       1.2     rmind 			ipi_hdl->func(ipi_hdl->arg);
    306       1.2     rmind 		}
    307       1.2     rmind 	}
    308       1.2     rmind }
    309       1.2     rmind 
    310       1.2     rmind /*
    311       1.2     rmind  * ipi_msg_cpu_handler: handle synchronous IPIs - iterate mailbox,
    312       1.2     rmind  * execute the passed functions and acknowledge the messages.
    313       1.2     rmind  */
    314       1.2     rmind static void
    315       1.2     rmind ipi_msg_cpu_handler(void *arg __unused)
    316       1.2     rmind {
    317       1.1     rmind 	const struct cpu_info * const ci = curcpu();
    318       1.1     rmind 	ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
    319       1.1     rmind 
    320       1.1     rmind 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
    321       1.1     rmind 		ipi_msg_t *msg;
    322       1.1     rmind 
    323       1.1     rmind 		/* Get the message. */
    324       1.1     rmind 		if ((msg = mbox->msg[i]) == NULL) {
    325       1.1     rmind 			continue;
    326       1.1     rmind 		}
    327       1.1     rmind 		mbox->msg[i] = NULL;
    328       1.1     rmind 
    329       1.1     rmind 		/* Execute the handler. */
    330       1.1     rmind 		KASSERT(msg->func);
    331       1.1     rmind 		msg->func(msg->arg);
    332       1.1     rmind 
    333       1.1     rmind 		/* Ack the request. */
    334  1.3.18.2    martin #ifndef __HAVE_ATOMIC_AS_MEMBAR
    335  1.3.18.2    martin 		membar_producer();
    336  1.3.18.2    martin #endif
    337       1.1     rmind 		atomic_dec_uint(&msg->_pending);
    338       1.1     rmind 	}
    339       1.1     rmind }
    340       1.1     rmind 
    341       1.1     rmind /*
    342       1.1     rmind  * ipi_unicast: send an IPI to a single CPU.
    343       1.1     rmind  *
    344       1.1     rmind  * => The CPU must be remote; must not be local.
    345       1.1     rmind  * => The caller must ipi_wait() on the message for completion.
    346       1.1     rmind  */
    347       1.1     rmind void
    348       1.1     rmind ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
    349       1.1     rmind {
    350       1.1     rmind 	const cpuid_t id = cpu_index(ci);
    351       1.1     rmind 
    352       1.1     rmind 	KASSERT(msg->func != NULL);
    353       1.1     rmind 	KASSERT(kpreempt_disabled());
    354       1.1     rmind 	KASSERT(curcpu() != ci);
    355       1.1     rmind 
    356       1.1     rmind 	msg->_pending = 1;
    357       1.1     rmind 	membar_producer();
    358       1.1     rmind 
    359       1.1     rmind 	put_msg(&ipi_mboxes[id], msg);
    360       1.2     rmind 	ipi_trigger(IPI_SYNCH_ID, ci);
    361       1.1     rmind }
    362       1.1     rmind 
    363       1.1     rmind /*
    364       1.1     rmind  * ipi_multicast: send an IPI to each CPU in the specified set.
    365       1.1     rmind  *
    366       1.1     rmind  * => The caller must ipi_wait() on the message for completion.
    367       1.1     rmind  */
    368       1.1     rmind void
    369       1.1     rmind ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
    370       1.1     rmind {
    371       1.1     rmind 	const struct cpu_info * const self = curcpu();
    372       1.1     rmind 	CPU_INFO_ITERATOR cii;
    373       1.1     rmind 	struct cpu_info *ci;
    374       1.1     rmind 	u_int local;
    375       1.1     rmind 
    376       1.1     rmind 	KASSERT(msg->func != NULL);
    377       1.1     rmind 	KASSERT(kpreempt_disabled());
    378       1.1     rmind 
    379       1.1     rmind 	local = !!kcpuset_isset(target, cpu_index(self));
    380       1.1     rmind 	msg->_pending = kcpuset_countset(target) - local;
    381       1.1     rmind 	membar_producer();
    382       1.1     rmind 
    383       1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    384       1.1     rmind 		cpuid_t id;
    385       1.1     rmind 
    386       1.1     rmind 		if (__predict_false(ci == self)) {
    387       1.1     rmind 			continue;
    388       1.1     rmind 		}
    389       1.1     rmind 		id = cpu_index(ci);
    390       1.1     rmind 		if (!kcpuset_isset(target, id)) {
    391       1.1     rmind 			continue;
    392       1.1     rmind 		}
    393       1.1     rmind 		put_msg(&ipi_mboxes[id], msg);
    394       1.2     rmind 		ipi_trigger(IPI_SYNCH_ID, ci);
    395       1.1     rmind 	}
    396       1.1     rmind 	if (local) {
    397       1.1     rmind 		msg->func(msg->arg);
    398       1.1     rmind 	}
    399       1.1     rmind }
    400       1.1     rmind 
    401       1.1     rmind /*
    402       1.1     rmind  * ipi_broadcast: send an IPI to all CPUs.
    403       1.1     rmind  *
    404       1.1     rmind  * => The caller must ipi_wait() on the message for completion.
    405       1.1     rmind  */
    406       1.1     rmind void
    407  1.3.18.1  christos ipi_broadcast(ipi_msg_t *msg, bool skip_self)
    408       1.1     rmind {
    409       1.1     rmind 	const struct cpu_info * const self = curcpu();
    410       1.1     rmind 	CPU_INFO_ITERATOR cii;
    411       1.1     rmind 	struct cpu_info *ci;
    412       1.1     rmind 
    413       1.1     rmind 	KASSERT(msg->func != NULL);
    414       1.1     rmind 	KASSERT(kpreempt_disabled());
    415       1.1     rmind 
    416       1.1     rmind 	msg->_pending = ncpu - 1;
    417       1.1     rmind 	membar_producer();
    418       1.1     rmind 
    419       1.1     rmind 	/* Broadcast IPIs for remote CPUs. */
    420       1.1     rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    421       1.1     rmind 		cpuid_t id;
    422       1.1     rmind 
    423       1.1     rmind 		if (__predict_false(ci == self)) {
    424       1.1     rmind 			continue;
    425       1.1     rmind 		}
    426       1.1     rmind 		id = cpu_index(ci);
    427       1.1     rmind 		put_msg(&ipi_mboxes[id], msg);
    428       1.2     rmind 		ipi_trigger(IPI_SYNCH_ID, ci);
    429       1.1     rmind 	}
    430       1.1     rmind 
    431  1.3.18.1  christos 	if (!skip_self) {
    432  1.3.18.1  christos 		/* Finally, execute locally. */
    433  1.3.18.1  christos 		msg->func(msg->arg);
    434  1.3.18.1  christos 	}
    435       1.1     rmind }
    436       1.1     rmind 
    437       1.1     rmind /*
    438       1.1     rmind  * ipi_wait: spin-wait until the message is processed.
    439       1.1     rmind  */
    440       1.1     rmind void
    441       1.1     rmind ipi_wait(ipi_msg_t *msg)
    442       1.1     rmind {
    443       1.1     rmind 	int count = SPINLOCK_BACKOFF_MIN;
    444       1.1     rmind 
    445       1.1     rmind 	while (msg->_pending) {
    446       1.1     rmind 		KASSERT(msg->_pending < ncpu);
    447       1.1     rmind 		SPINLOCK_BACKOFF(count);
    448       1.1     rmind 	}
    449       1.1     rmind }
    450