Home | History | Annotate | Line # | Download | only in kern
subr_xcall.c revision 1.1.2.3
      1 /*	$NetBSD: subr_xcall.c,v 1.1.2.3 2007/10/09 13:44:29 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Cross call support
     41  *
     42  * Background
     43  *
     44  *	Sometimes it is necessary to modify hardware state that is tied
     45  *	directly to individual CPUs (such as a CPU's local timer), and
     46  *	these updates can not be done remotely by another CPU.  The LWP
     47  *	requesting the update may be unable to guarantee that it will be
     48  *	running on the CPU where the update must occur, when the update
     49  *	occurs.
     50  *
     51  *	Additionally, it's sometimes necessary to modify per-CPU software
     52  *	state from a remote CPU.  Where these update operations are so
     53  *	rare or the access to the per-CPU data so frequent that the cost
     54  *	of using locking or atomic operations to provide coherency is
     55  *	prohobitive, another way must be found.
     56  *
     57  *	Cross calls help to solve these types of problem by allowing
     58  *	any CPU in the system to request that an arbitrary function be
     59  *	executed on any other CPU.
     60  *
     61  * Implementation
     62  *
     63  *	A slow mechanism for making 'low priority' cross calls is
     64  *	provided.  The function to be executed runs on the remote CPU
     65  *	within a bound kthread.  No queueing is provided, and the
     66  *	implementation uses global state.  The function being called may
     67  *	block briefly on locks, but in doing so must be careful to not
     68  *	interfere with other cross calls in the system.  The function is
     69  *	called with thread context and not from a soft interrupt, so it
     70  *	can ensure that it is not interrupting other code running on the
     71  *	CPU, and so has exclusive access to the CPU.  Since this facility
     72  *	is heavyweight, it's expected that it will not be used often.
     73  *
     74  * Future directions
     75  *
     76  *	Add a low-overhead mechanism to run cross calls in interrupt
     77  *	context (XC_HIGHPRI).
     78  */
     79 
     80 #include <sys/cdefs.h>
     81 __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.1.2.3 2007/10/09 13:44:29 ad Exp $");
     82 
     83 #include <sys/types.h>
     84 #include <sys/param.h>
     85 #include <sys/xcall.h>
     86 #include <sys/mutex.h>
     87 #include <sys/condvar.h>
     88 #include <sys/evcnt.h>
     89 #include <sys/kthread.h>
     90 #include <sys/cpu.h>
     91 
     92 #define	PRI_XCALL	0
     93 
     94 static void	xc_thread(void *);
     95 static uint64_t	xc_lowpri(u_int, xcfunc_t, void *, void *, struct cpu_info *);
     96 
     97 static kmutex_t		xc_lock;
     98 static xcfunc_t		xc_func;
     99 static void		*xc_arg1;
    100 static void		*xc_arg2;
    101 static kcondvar_t	xc_busy;
    102 static struct evcnt	xc_unicast_ev;
    103 static struct evcnt	xc_broadcast_ev;
    104 static uint64_t		xc_headp;
    105 static uint64_t		xc_tailp;
    106 static uint64_t		xc_donep;
    107 
    108 /*
    109  * xc_init_cpu:
    110  *
    111  *	Initialize the cross-call subsystem.  Called once for each CPU
    112  *	in the system as they are attached.
    113  */
    114 void
    115 xc_init_cpu(struct cpu_info *ci)
    116 {
    117 	static bool again;
    118 	int error;
    119 
    120 	if (!again) {
    121 		/* Autoconfiguration will prevent re-entry. */
    122 		again = true;
    123 		mutex_init(&xc_lock, MUTEX_DEFAULT, IPL_NONE);
    124 		cv_init(&xc_busy, "xcallbsy");
    125 		evcnt_attach_dynamic(&xc_unicast_ev, EVCNT_TYPE_MISC, NULL,
    126 		   "crosscall", "unicast");
    127 		evcnt_attach_dynamic(&xc_broadcast_ev, EVCNT_TYPE_MISC, NULL,
    128 		   "crosscall", "broadcast");
    129 	}
    130 
    131 	cv_init(&ci->ci_data.cpu_xcall, "xcall");
    132 	error = kthread_create(PRI_XCALL, KTHREAD_MPSAFE, ci, xc_thread,
    133 	    NULL, NULL, "xcall/%d", (int)ci->ci_cpuid);
    134 	if (error != 0)
    135 		panic("xc_init_cpu: error %d", error);
    136 }
    137 
    138 /*
    139  * xc_unicast:
    140  *
    141  *	Trigger a call on all CPUs in the system.
    142  */
    143 uint64_t
    144 xc_broadcast(u_int flags, xcfunc_t func, void *arg1, void *arg2)
    145 {
    146 
    147 	if ((flags & XC_HIGHPRI) != 0) {
    148 		panic("xc_unicast: no high priority crosscalls yet");
    149 	} else {
    150 		return xc_lowpri(flags, func, arg1, arg2, NULL);
    151 	}
    152 }
    153 
    154 /*
    155  * xc_unicast:
    156  *
    157  *	Trigger a call on one CPU.
    158  */
    159 uint64_t
    160 xc_unicast(u_int flags, xcfunc_t func, void *arg1, void *arg2,
    161 	   struct cpu_info *ci)
    162 {
    163 
    164 	if ((flags & XC_HIGHPRI) != 0) {
    165 		panic("xc_unicast: no high priority crosscalls yet");
    166 	} else {
    167 		KASSERT(ci != NULL);
    168 		return xc_lowpri(flags, func, arg1, arg2, ci);
    169 	}
    170 }
    171 
    172 /*
    173  * xc_lowpri:
    174  *
    175  *	Trigger a low priority call on one or more CPUs.
    176  */
    177 static uint64_t
    178 xc_lowpri(u_int flags, xcfunc_t func, void *arg1, void *arg2,
    179 	  struct cpu_info *ci)
    180 {
    181 	CPU_INFO_ITERATOR cii;
    182 	u_int where;
    183 
    184 	mutex_enter(&xc_lock);
    185 	while (xc_headp != xc_tailp)
    186 		cv_wait(&xc_busy, &xc_lock);
    187 	xc_arg1 = arg1;
    188 	xc_arg2 = arg2;
    189 	xc_func = func;
    190 	if (ci == NULL) {
    191 		xc_broadcast_ev.ev_count++;
    192 		for (CPU_INFO_FOREACH(cii, ci)) {
    193 			xc_headp += 1;
    194 			ci->ci_data.cpu_xcall_pending = true;
    195 			cv_signal(&ci->ci_data.cpu_xcall);
    196 		}
    197 	} else {
    198 		xc_unicast_ev.ev_count++;
    199 		xc_headp += 1;
    200 		ci->ci_data.cpu_xcall_pending = true;
    201 		cv_signal(&ci->ci_data.cpu_xcall);
    202 	}
    203 	KASSERT(xc_tailp < xc_headp);
    204 	where = xc_headp;
    205 	mutex_exit(&xc_lock);
    206 
    207 	return where;
    208 }
    209 
    210 /*
    211  * xc_wait:
    212  *
    213  *	Wait for a cross call to complete.
    214  */
    215 void
    216 xc_wait(uint64_t where)
    217 {
    218 
    219 	if (xc_donep >= where)
    220 		return;
    221 
    222 	mutex_enter(&xc_lock);
    223 	while (xc_donep < where)
    224 		cv_wait(&xc_busy, &xc_lock);
    225 	mutex_exit(&xc_lock);
    226 }
    227 
    228 /*
    229  * xc_thread:
    230  *
    231  *	One thread per-CPU to dispatch low priority calls.
    232  */
    233 static void
    234 xc_thread(void *cookie)
    235 {
    236 	void *arg1, *arg2;
    237 	struct cpu_info *ci;
    238 	xcfunc_t func;
    239 
    240 	ci = curcpu();
    241 
    242 	mutex_enter(&xc_lock);
    243 	for (;;) {
    244 		while (!ci->ci_data.cpu_xcall_pending) {
    245 			if (xc_headp == xc_tailp)
    246 				cv_broadcast(&xc_busy);
    247 			cv_wait(&ci->ci_data.cpu_xcall, &xc_lock);
    248 			KASSERT(ci == curcpu());
    249 		}
    250 		ci->ci_data.cpu_xcall_pending = false;
    251 		func = xc_func;
    252 		arg1 = xc_arg1;
    253 		arg2 = xc_arg2;
    254 		xc_tailp++;
    255 		mutex_exit(&xc_lock);
    256 
    257 		(*func)(arg1, arg2);
    258 
    259 		mutex_enter(&xc_lock);
    260 		xc_donep++;
    261 	}
    262 	/* NOTREACHED */
    263 }
    264 /*	$NetBSD: subr_xcall.c,v 1.1.2.3 2007/10/09 13:44:29 ad Exp $	*/
    265 
    266 /*-
    267  * Copyright (c) 2007 The NetBSD Foundation, Inc.
    268  * All rights reserved.
    269  *
    270  * This code is derived from software contributed to The NetBSD Foundation
    271  * by Andrew Doran.
    272  *
    273  * Redistribution and use in source and binary forms, with or without
    274  * modification, are permitted provided that the following conditions
    275  * are met:
    276  * 1. Redistributions of source code must retain the above copyright
    277  *    notice, this list of conditions and the following disclaimer.
    278  * 2. Redistributions in binary form must reproduce the above copyright
    279  *    notice, this list of conditions and the following disclaimer in the
    280  *    documentation and/or other materials provided with the distribution.
    281  * 3. All advertising materials mentioning features or use of this software
    282  *    must display the following acknowledgement:
    283  *	This product includes software developed by the NetBSD
    284  *	Foundation, Inc. and its contributors.
    285  * 4. Neither the name of The NetBSD Foundation nor the names of its
    286  *    contributors may be used to endorse or promote products derived
    287  *    from this software without specific prior written permission.
    288  *
    289  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
    290  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    291  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    292  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
    293  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
    294  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
    295  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    296  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
    297  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    298  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
    299  * POSSIBILITY OF SUCH DAMAGE.
    300  */
    301 
    302 /*
    303  * Cross call support
    304  *
    305  * Background
    306  *
    307  *	Sometimes it is necessary to modify hardware state that is tied
    308  *	directly to individual CPUs (such as a CPU's local timer), and
    309  *	these updates can not be done remotely by another CPU.  The LWP
    310  *	requesting the update may be unable to guarantee that it will be
    311  *	running on the CPU where the update must occur, when the update
    312  *	occurs.
    313  *
    314  *	Additionally, it's sometimes necessary to modify per-CPU software
    315  *	state from a remote CPU.  Where these update operations are so
    316  *	rare or the access to the per-CPU data so frequent that the cost
    317  *	of using locking or atomic operations to provide coherency is
    318  *	prohobitive, another way must be found.
    319  *
    320  *	Cross calls help to solve these types of problem by allowing
    321  *	any CPU in the system to request that an arbitrary function be
    322  *	executed on any other CPU.
    323  *
    324  * Implementation
    325  *
    326  *	A slow mechanism for making 'low priority' cross calls is
    327  *	provided.  The function to be executed runs on the remote CPU
    328  *	within a bound kthread.  No queueing is provided, and the
    329  *	implementation uses global state.  The function being called may
    330  *	block briefly on locks, but in doing so must be careful to not
    331  *	interfere with other cross calls in the system.  The function is
    332  *	called with thread context and not from a soft interrupt, so it
    333  *	can ensure that it is not interrupting other code running on the
    334  *	CPU, and so has exclusive access to the CPU.  Since this facility
    335  *	is heavyweight, it's expected that it will not be used often.
    336  *
    337  * Future directions
    338  *
    339  *	Add a low-overhead mechanism to run cross calls in interrupt
    340  *	context (XC_HIGHPRI).
    341  */
    342 
    343 #include <sys/cdefs.h>
    344 __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.1.2.3 2007/10/09 13:44:29 ad Exp $");
    345 
    346 #include <sys/types.h>
    347 #include <sys/param.h>
    348 #include <sys/xcall.h>
    349 #include <sys/mutex.h>
    350 #include <sys/condvar.h>
    351 #include <sys/evcnt.h>
    352 #include <sys/kthread.h>
    353 
    354 #define	PRI_XCALL	PRI_KERNEL_RT
    355 
    356 static void	xc_thread(void *);
    357 static uint64_t	xc_lowpri(u_int, xcfunc_t, void *, void *, struct cpu_info *);
    358 
    359 static kmutex_t		xc_lock;
    360 static xcfunc_t		xc_func;
    361 static void		*xc_arg1;
    362 static void		*xc_arg2;
    363 static kcondvar_t	xc_busy;
    364 static struct evcnt	xc_unicast_ev;
    365 static struct evcnt	xc_broadcast_ev;
    366 static uint64_t		xc_headp;
    367 static uint64_t		xc_tailp;
    368 static uint64_t		xc_donep;
    369 
    370 /*
    371  * xc_init_cpu:
    372  *
    373  *	Initialize the cross-call subsystem.  Called once for each CPU
    374  *	in the system as they are attached.
    375  */
    376 void
    377 xc_init_cpu(struct cpu_info *ci)
    378 {
    379 	static bool again;
    380 	int error;
    381 
    382 	if (!again) {
    383 		/* Autoconfiguration will prevent re-entry. */
    384 		again = true;
    385 		mutex_init(&xc_lock, MUTEX_DEFAULT, IPL_NONE);
    386 		cv_init(&xc_busy, "xcallbsy");
    387 		evcnt_attach_dynamic(&xc_unicast_ev, EVCNT_TYPE_MISC, NULL,
    388 		   "crosscall", "unicast");
    389 		evcnt_attach_dynamic(&xc_broadcast_ev, EVCNT_TYPE_MISC, NULL,
    390 		   "crosscall", "broadcast");
    391 	}
    392 
    393 	cv_init(&ci->ci_data.cpu_xcall, "xcall");
    394 	error = kthread_create(PRI_XCALL, KTHREAD_MPSAFE, ci, xc_thread,
    395 	    NULL, NULL, "xcall/%d", (int)ci->ci_cpuid);
    396 	if (error != 0)
    397 		panic("xc_init_cpu: error %d", error);
    398 }
    399 
    400 /*
    401  * xc_unicast:
    402  *
    403  *	Trigger a call on all CPUs in the system.
    404  */
    405 uint64_t
    406 xc_broadcast(u_int flags, xcfunc_t func, void *arg1, void *arg2)
    407 {
    408 
    409 	if ((flags & XC_HIGHPRI) != 0) {
    410 		panic("xc_unicast: no high priority crosscalls yet");
    411 	} else {
    412 		return xc_lowpri(flags, func, arg1, arg2, NULL);
    413 	}
    414 }
    415 
    416 /*
    417  * xc_unicast:
    418  *
    419  *	Trigger a call on one CPU.
    420  */
    421 uint64_t
    422 xc_unicast(u_int flags, xcfunc_t func, void *arg1, void *arg2,
    423 	   struct cpu_info *ci)
    424 {
    425 
    426 	if ((flags & XC_HIGHPRI) != 0) {
    427 		panic("xc_unicast: no high priority crosscalls yet");
    428 	} else {
    429 		KASSERT(ci != NULL);
    430 		return xc_lowpri(flags, func, arg1, arg2, ci);
    431 	}
    432 }
    433 
    434 /*
    435  * xc_lowpri:
    436  *
    437  *	Trigger a low priority call on one or more CPUs.
    438  */
    439 static uint64_t
    440 xc_lowpri(u_int flags, xcfunc_t func, void *arg1, void *arg2,
    441 	  struct cpu_info *ci)
    442 {
    443 	CPU_INFO_ITERATOR cii;
    444 	u_int where;
    445 
    446 	mutex_enter(&xc_lock);
    447 	while (xc_headp != xc_tailp)
    448 		cv_wait(&xc_busy, &xc_lock);
    449 	xc_arg1 = arg1;
    450 	xc_arg2 = arg2;
    451 	xc_func = func;
    452 	if (ci == NULL) {
    453 		xc_broadcast_ev.ev_count++;
    454 		for (CPU_INFO_FOREACH(cii, ci)) {
    455 			xc_headp += 1;
    456 			ci->ci_data.cpu_xcall_pending = true;
    457 			cv_signal(&ci->ci_data.cpu_xcall);
    458 		}
    459 	} else {
    460 		xc_unicast_ev.ev_count++;
    461 		xc_headp += 1;
    462 		ci->ci_data.cpu_xcall_pending = true;
    463 		cv_signal(&ci->ci_data.cpu_xcall);
    464 	}
    465 	KASSERT(xc_tailp < xc_headp);
    466 	where = xc_headp;
    467 	mutex_exit(&xc_lock);
    468 
    469 	return where;
    470 }
    471 
    472 /*
    473  * xc_wait:
    474  *
    475  *	Wait for a cross call to complete.
    476  */
    477 void
    478 xc_wait(uint64_t where)
    479 {
    480 
    481 	if (xc_donep >= where)
    482 		return;
    483 
    484 	mutex_enter(&xc_lock);
    485 	while (xc_donep < where)
    486 		cv_wait(&xc_busy, &xc_lock);
    487 	mutex_exit(&xc_lock);
    488 }
    489 
    490 /*
    491  * xc_thread:
    492  *
    493  *	One thread per-CPU to dispatch low priority calls.
    494  */
    495 static void
    496 xc_thread(void *cookie)
    497 {
    498 	void *arg1, *arg2;
    499 	struct cpu_info *ci;
    500 	xcfunc_t func;
    501 
    502 	ci = curcpu();
    503 
    504 	mutex_enter(&xc_lock);
    505 	for (;;) {
    506 		while (!ci->ci_data.cpu_xcall_pending) {
    507 			if (xc_headp == xc_tailp)
    508 				cv_broadcast(&xc_busy);
    509 			cv_wait(&ci->ci_data.cpu_xcall, &xc_lock);
    510 			KASSERT(ci == curcpu());
    511 		}
    512 		ci->ci_data.cpu_xcall_pending = false;
    513 		func = xc_func;
    514 		arg1 = xc_arg1;
    515 		arg2 = xc_arg2;
    516 		xc_tailp++;
    517 		mutex_exit(&xc_lock);
    518 
    519 		(*func)(arg1, arg2);
    520 
    521 		mutex_enter(&xc_lock);
    522 		xc_donep++;
    523 	}
    524 	/* NOTREACHED */
    525 }
    526