Home | History | Annotate | Line # | Download | only in kern
subr_ipi.c revision 1.1
      1  1.1  rmind /*	$NetBSD: subr_ipi.c,v 1.1 2014/05/19 22:47:54 rmind Exp $	*/
      2  1.1  rmind 
      3  1.1  rmind /*-
      4  1.1  rmind  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  1.1  rmind  * All rights reserved.
      6  1.1  rmind  *
      7  1.1  rmind  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  rmind  * by Mindaugas Rasiukevicius.
      9  1.1  rmind  *
     10  1.1  rmind  * Redistribution and use in source and binary forms, with or without
     11  1.1  rmind  * modification, are permitted provided that the following conditions
     12  1.1  rmind  * are met:
     13  1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     14  1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     15  1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  rmind  *    documentation and/or other materials provided with the distribution.
     18  1.1  rmind  *
     19  1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  rmind  */
     31  1.1  rmind 
     32  1.1  rmind /*
     33  1.1  rmind  * Inter-processor interrupt (IPI) interface with cross-call support.
     34  1.1  rmind  */
     35  1.1  rmind 
     36  1.1  rmind #include <sys/cdefs.h>
     37  1.1  rmind __KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.1 2014/05/19 22:47:54 rmind Exp $");
     38  1.1  rmind 
     39  1.1  rmind #include <sys/param.h>
     40  1.1  rmind #include <sys/types.h>
     41  1.1  rmind 
     42  1.1  rmind #include <sys/atomic.h>
     43  1.1  rmind #include <sys/evcnt.h>
     44  1.1  rmind #include <sys/cpu.h>
     45  1.1  rmind #include <sys/ipi.h>
     46  1.1  rmind #include <sys/kcpuset.h>
     47  1.1  rmind #include <sys/kmem.h>
     48  1.1  rmind #include <sys/lock.h>
     49  1.1  rmind 
     50  1.1  rmind /*
     51  1.1  rmind  * Per-CPU mailbox for IPI messages: it is a single cache line storing
     52  1.1  rmind  * up to IPI_MSG_MAX messages.
     53  1.1  rmind  */
     54  1.1  rmind 
     55  1.1  rmind #define	IPI_MSG_SLOTS	(CACHE_LINE_SIZE / sizeof(ipi_msg_t *))
     56  1.1  rmind #define	IPI_MSG_MAX	IPI_MSG_SLOTS
     57  1.1  rmind 
     58  1.1  rmind typedef struct {
     59  1.1  rmind 	ipi_msg_t *	msg[IPI_MSG_SLOTS];
     60  1.1  rmind } ipi_mbox_t;
     61  1.1  rmind 
     62  1.1  rmind static ipi_mbox_t *	ipi_mboxes	__read_mostly;
     63  1.1  rmind static struct evcnt	ipi_mboxfull_ev	__cacheline_aligned;
     64  1.1  rmind 
     65  1.1  rmind #ifndef MULTIPROCESSOR
     66  1.1  rmind #define	cpu_ipi(ci)	KASSERT(ci == NULL)
     67  1.1  rmind #endif
     68  1.1  rmind 
     69  1.1  rmind void
     70  1.1  rmind ipi_sysinit(void)
     71  1.1  rmind {
     72  1.1  rmind 	const size_t len = ncpu * sizeof(ipi_mbox_t);
     73  1.1  rmind 
     74  1.1  rmind 	/* Allocate per-CPU IPI mailboxes. */
     75  1.1  rmind 	ipi_mboxes = kmem_zalloc(len, KM_SLEEP);
     76  1.1  rmind 	KASSERT(ipi_mboxes != NULL);
     77  1.1  rmind 
     78  1.1  rmind 	evcnt_attach_dynamic(&ipi_mboxfull_ev, EVCNT_TYPE_MISC, NULL,
     79  1.1  rmind 	   "ipi", "full");
     80  1.1  rmind }
     81  1.1  rmind 
     82  1.1  rmind /*
     83  1.1  rmind  * put_msg: insert message into the mailbox.
     84  1.1  rmind  */
     85  1.1  rmind static inline void
     86  1.1  rmind put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
     87  1.1  rmind {
     88  1.1  rmind 	int count = SPINLOCK_BACKOFF_MIN;
     89  1.1  rmind again:
     90  1.1  rmind 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
     91  1.1  rmind 		if (__predict_true(mbox->msg[i] == NULL) &&
     92  1.1  rmind 		    atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
     93  1.1  rmind 			return;
     94  1.1  rmind 		}
     95  1.1  rmind 	}
     96  1.1  rmind 
     97  1.1  rmind 	/* All slots are full: we have to spin-wait. */
     98  1.1  rmind 	ipi_mboxfull_ev.ev_count++;
     99  1.1  rmind 	SPINLOCK_BACKOFF(count);
    100  1.1  rmind 	goto again;
    101  1.1  rmind }
    102  1.1  rmind 
    103  1.1  rmind /*
    104  1.1  rmind  * ipi_cpu_handler: the IPI handler.
    105  1.1  rmind  */
    106  1.1  rmind void
    107  1.1  rmind ipi_cpu_handler(void)
    108  1.1  rmind {
    109  1.1  rmind 	const struct cpu_info * const ci = curcpu();
    110  1.1  rmind 	ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
    111  1.1  rmind 
    112  1.1  rmind 	KASSERT(curcpu() == ci);
    113  1.1  rmind 
    114  1.1  rmind 	for (u_int i = 0; i < IPI_MSG_MAX; i++) {
    115  1.1  rmind 		ipi_msg_t *msg;
    116  1.1  rmind 
    117  1.1  rmind 		/* Get the message. */
    118  1.1  rmind 		if ((msg = mbox->msg[i]) == NULL) {
    119  1.1  rmind 			continue;
    120  1.1  rmind 		}
    121  1.1  rmind 		mbox->msg[i] = NULL;
    122  1.1  rmind 
    123  1.1  rmind 		/* Execute the handler. */
    124  1.1  rmind 		KASSERT(msg->func);
    125  1.1  rmind 		msg->func(msg->arg);
    126  1.1  rmind 
    127  1.1  rmind 		/* Ack the request. */
    128  1.1  rmind 		atomic_dec_uint(&msg->_pending);
    129  1.1  rmind 	}
    130  1.1  rmind }
    131  1.1  rmind 
    132  1.1  rmind /*
    133  1.1  rmind  * ipi_unicast: send an IPI to a single CPU.
    134  1.1  rmind  *
    135  1.1  rmind  * => The CPU must be remote; must not be local.
    136  1.1  rmind  * => The caller must ipi_wait() on the message for completion.
    137  1.1  rmind  */
    138  1.1  rmind void
    139  1.1  rmind ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
    140  1.1  rmind {
    141  1.1  rmind 	const cpuid_t id = cpu_index(ci);
    142  1.1  rmind 
    143  1.1  rmind 	KASSERT(msg->func != NULL);
    144  1.1  rmind 	KASSERT(kpreempt_disabled());
    145  1.1  rmind 	KASSERT(curcpu() != ci);
    146  1.1  rmind 
    147  1.1  rmind 	msg->_pending = 1;
    148  1.1  rmind 	membar_producer();
    149  1.1  rmind 
    150  1.1  rmind 	put_msg(&ipi_mboxes[id], msg);
    151  1.1  rmind 	cpu_ipi(ci);
    152  1.1  rmind }
    153  1.1  rmind 
    154  1.1  rmind /*
    155  1.1  rmind  * ipi_multicast: send an IPI to each CPU in the specified set.
    156  1.1  rmind  *
    157  1.1  rmind  * => The caller must ipi_wait() on the message for completion.
    158  1.1  rmind  */
    159  1.1  rmind void
    160  1.1  rmind ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
    161  1.1  rmind {
    162  1.1  rmind 	const struct cpu_info * const self = curcpu();
    163  1.1  rmind 	CPU_INFO_ITERATOR cii;
    164  1.1  rmind 	struct cpu_info *ci;
    165  1.1  rmind 	u_int local;
    166  1.1  rmind 
    167  1.1  rmind 	KASSERT(msg->func != NULL);
    168  1.1  rmind 	KASSERT(kpreempt_disabled());
    169  1.1  rmind 
    170  1.1  rmind 	local = !!kcpuset_isset(target, cpu_index(self));
    171  1.1  rmind 	msg->_pending = kcpuset_countset(target) - local;
    172  1.1  rmind 	membar_producer();
    173  1.1  rmind 
    174  1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    175  1.1  rmind 		cpuid_t id;
    176  1.1  rmind 
    177  1.1  rmind 		if (__predict_false(ci == self)) {
    178  1.1  rmind 			continue;
    179  1.1  rmind 		}
    180  1.1  rmind 		id = cpu_index(ci);
    181  1.1  rmind 		if (!kcpuset_isset(target, id)) {
    182  1.1  rmind 			continue;
    183  1.1  rmind 		}
    184  1.1  rmind 		put_msg(&ipi_mboxes[id], msg);
    185  1.1  rmind 		cpu_ipi(ci);
    186  1.1  rmind 	}
    187  1.1  rmind 	if (local) {
    188  1.1  rmind 		msg->func(msg->arg);
    189  1.1  rmind 	}
    190  1.1  rmind }
    191  1.1  rmind 
    192  1.1  rmind /*
    193  1.1  rmind  * ipi_broadcast: send an IPI to all CPUs.
    194  1.1  rmind  *
    195  1.1  rmind  * => The caller must ipi_wait() on the message for completion.
    196  1.1  rmind  */
    197  1.1  rmind void
    198  1.1  rmind ipi_broadcast(ipi_msg_t *msg)
    199  1.1  rmind {
    200  1.1  rmind 	const struct cpu_info * const self = curcpu();
    201  1.1  rmind 	CPU_INFO_ITERATOR cii;
    202  1.1  rmind 	struct cpu_info *ci;
    203  1.1  rmind 
    204  1.1  rmind 	KASSERT(msg->func != NULL);
    205  1.1  rmind 	KASSERT(kpreempt_disabled());
    206  1.1  rmind 
    207  1.1  rmind 	msg->_pending = ncpu - 1;
    208  1.1  rmind 	membar_producer();
    209  1.1  rmind 
    210  1.1  rmind 	/* Broadcast IPIs for remote CPUs. */
    211  1.1  rmind 	for (CPU_INFO_FOREACH(cii, ci)) {
    212  1.1  rmind 		cpuid_t id;
    213  1.1  rmind 
    214  1.1  rmind 		if (__predict_false(ci == self)) {
    215  1.1  rmind 			continue;
    216  1.1  rmind 		}
    217  1.1  rmind 		id = cpu_index(ci);
    218  1.1  rmind 		put_msg(&ipi_mboxes[id], msg);
    219  1.1  rmind 	}
    220  1.1  rmind 	cpu_ipi(NULL);
    221  1.1  rmind 
    222  1.1  rmind 	/* Finally, execute locally. */
    223  1.1  rmind 	msg->func(msg->arg);
    224  1.1  rmind }
    225  1.1  rmind 
    226  1.1  rmind /*
    227  1.1  rmind  * ipi_wait: spin-wait until the message is processed.
    228  1.1  rmind  */
    229  1.1  rmind void
    230  1.1  rmind ipi_wait(ipi_msg_t *msg)
    231  1.1  rmind {
    232  1.1  rmind 	int count = SPINLOCK_BACKOFF_MIN;
    233  1.1  rmind 
    234  1.1  rmind 	while (msg->_pending) {
    235  1.1  rmind 		KASSERT(msg->_pending < ncpu);
    236  1.1  rmind 		SPINLOCK_BACKOFF(count);
    237  1.1  rmind 	}
    238  1.1  rmind }
    239