subr_xcall.c revision 1.34.18.1 1 /* $NetBSD: subr_xcall.c,v 1.34.18.1 2024/09/11 10:09:19 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2007-2010, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Cross call support
34 *
35 * Background
36 *
37 * Sometimes it is necessary to modify hardware state that is tied
38 * directly to individual CPUs (such as a CPU's local timer), and
39 * these updates can not be done remotely by another CPU. The LWP
40 * requesting the update may be unable to guarantee that it will be
41 * running on the CPU where the update must occur, when the update
42 * occurs.
43 *
44 * Additionally, it's sometimes necessary to modify per-CPU software
45 * state from a remote CPU. Where these update operations are so
46 * rare or the access to the per-CPU data so frequent that the cost
47 * of using locking or atomic operations to provide coherency is
48 * prohibitive, another way must be found.
49 *
50 * Cross calls help to solve these types of problem by allowing
51 * any LWP in the system to request that an arbitrary function be
52 * executed on a specific CPU.
53 *
54 * Implementation
55 *
56 * A slow mechanism for making low priority cross calls is
57 * provided. The function to be executed runs on the remote CPU
58 * within a bound kthread. No queueing is provided, and the
59 * implementation uses global state. The function being called may
60 * block briefly on locks, but in doing so must be careful to not
61 * interfere with other cross calls in the system. The function is
62 * called with thread context and not from a soft interrupt, so it
63 * can ensure that it is not interrupting other code running on the
64 * CPU, and so has exclusive access to the CPU. Since this facility
65 * is heavyweight, it's expected that it will not be used often.
66 *
67 * Cross calls must not allocate memory, as the pagedaemon uses cross
68 * calls (and memory allocation may need to wait on the pagedaemon).
69 *
70 * A low-overhead mechanism for high priority calls (XC_HIGHPRI) is
71 * also provided. The function to be executed runs in software
72 * interrupt context at IPL_SOFTSERIAL level, and is expected to
73 * be very lightweight, e.g. avoid blocking.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.34.18.1 2024/09/11 10:09:19 martin Exp $");
78
79 #include <sys/types.h>
80 #include <sys/param.h>
81 #include <sys/xcall.h>
82 #include <sys/mutex.h>
83 #include <sys/condvar.h>
84 #include <sys/evcnt.h>
85 #include <sys/kthread.h>
86 #include <sys/cpu.h>
87 #include <sys/atomic.h>
88
89 #ifdef _RUMPKERNEL
90 #include "rump_private.h"
91 #endif
92
93 /* Cross-call state box. */
94 typedef struct {
95 kmutex_t xc_lock;
96 kcondvar_t xc_busy;
97 xcfunc_t xc_func;
98 void * xc_arg1;
99 void * xc_arg2;
100 uint64_t xc_headp;
101 uint64_t xc_donep;
102 unsigned int xc_ipl;
103 } xc_state_t;
104
105 /* Bit indicating high (1) or low (0) priority. */
106 #define XC_PRI_BIT (1ULL << 63)
107
108 /* Low priority xcall structures. */
109 static xc_state_t xc_low_pri __cacheline_aligned;
110
111 /* High priority xcall structures. */
112 static xc_state_t xc_high_pri __cacheline_aligned;
113 static void * xc_sihs[4] __cacheline_aligned;
114
115 /* Event counters. */
116 static struct evcnt xc_unicast_ev __cacheline_aligned;
117 static struct evcnt xc_broadcast_ev __cacheline_aligned;
118
119 static void xc_init(void);
120 static void xc_thread(void *);
121
122 static inline uint64_t xc_highpri(xcfunc_t, void *, void *, struct cpu_info *,
123 unsigned int);
124 static inline uint64_t xc_lowpri(xcfunc_t, void *, void *, struct cpu_info *);
125
126 /* The internal form of IPL */
127 #define XC_IPL_MASK 0xff00
128 /*
129 * Assign 0 to XC_IPL_SOFTSERIAL to treat IPL_SOFTSERIAL as the default value
130 * (just XC_HIGHPRI).
131 */
132 #define XC_IPL_SOFTSERIAL 0
133 #define XC_IPL_SOFTNET 1
134 #define XC_IPL_SOFTBIO 2
135 #define XC_IPL_SOFTCLOCK 3
136 #define XC_IPL_MAX XC_IPL_SOFTCLOCK
137
138 CTASSERT(XC_IPL_MAX <= __arraycount(xc_sihs));
139
140 /*
141 * xc_init:
142 *
143 * Initialize low and high priority cross-call structures.
144 */
145 static void
146 xc_init(void)
147 {
148 xc_state_t *xclo = &xc_low_pri, *xchi = &xc_high_pri;
149
150 memset(xclo, 0, sizeof(xc_state_t));
151 mutex_init(&xclo->xc_lock, MUTEX_DEFAULT, IPL_NONE);
152 cv_init(&xclo->xc_busy, "xclocv");
153
154 memset(xchi, 0, sizeof(xc_state_t));
155 mutex_init(&xchi->xc_lock, MUTEX_DEFAULT, IPL_SOFTSERIAL);
156 cv_init(&xchi->xc_busy, "xchicv");
157
158 /* Set up a softint for each IPL_SOFT*. */
159 #define SETUP_SOFTINT(xipl, sipl) do { \
160 xc_sihs[(xipl)] = softint_establish( (sipl) | SOFTINT_MPSAFE,\
161 xc__highpri_intr, NULL); \
162 KASSERT(xc_sihs[(xipl)] != NULL); \
163 } while (0)
164
165 SETUP_SOFTINT(XC_IPL_SOFTSERIAL, SOFTINT_SERIAL);
166 /*
167 * If a IPL_SOFTXXX have the same value of the previous, we don't use
168 * the IPL (see xc_encode_ipl). So we don't need to allocate a softint
169 * for it.
170 */
171 #if IPL_SOFTNET != IPL_SOFTSERIAL
172 SETUP_SOFTINT(XC_IPL_SOFTNET, SOFTINT_NET);
173 #endif
174 #if IPL_SOFTBIO != IPL_SOFTNET
175 SETUP_SOFTINT(XC_IPL_SOFTBIO, SOFTINT_BIO);
176 #endif
177 #if IPL_SOFTCLOCK != IPL_SOFTBIO
178 SETUP_SOFTINT(XC_IPL_SOFTCLOCK, SOFTINT_CLOCK);
179 #endif
180
181 #undef SETUP_SOFTINT
182
183 evcnt_attach_dynamic(&xc_unicast_ev, EVCNT_TYPE_MISC, NULL,
184 "crosscall", "unicast");
185 evcnt_attach_dynamic(&xc_broadcast_ev, EVCNT_TYPE_MISC, NULL,
186 "crosscall", "broadcast");
187 }
188
189 /*
190 * Encode an IPL to a form that can be embedded into flags of xc_broadcast
191 * or xc_unicast.
192 */
193 unsigned int
194 xc_encode_ipl(int ipl)
195 {
196
197 switch (ipl) {
198 case IPL_SOFTSERIAL:
199 return __SHIFTIN(XC_IPL_SOFTSERIAL, XC_IPL_MASK);
200 /* IPL_SOFT* can be the same value (e.g., on sparc or mips). */
201 #if IPL_SOFTNET != IPL_SOFTSERIAL
202 case IPL_SOFTNET:
203 return __SHIFTIN(XC_IPL_SOFTNET, XC_IPL_MASK);
204 #endif
205 #if IPL_SOFTBIO != IPL_SOFTNET
206 case IPL_SOFTBIO:
207 return __SHIFTIN(XC_IPL_SOFTBIO, XC_IPL_MASK);
208 #endif
209 #if IPL_SOFTCLOCK != IPL_SOFTBIO
210 case IPL_SOFTCLOCK:
211 return __SHIFTIN(XC_IPL_SOFTCLOCK, XC_IPL_MASK);
212 #endif
213 }
214
215 panic("Invalid IPL: %d", ipl);
216 }
217
218 /*
219 * Extract an XC_IPL from flags of xc_broadcast or xc_unicast.
220 */
221 static inline unsigned int
222 xc_extract_ipl(unsigned int flags)
223 {
224
225 return __SHIFTOUT(flags, XC_IPL_MASK);
226 }
227
228 /*
229 * xc_init_cpu:
230 *
231 * Initialize the cross-call subsystem. Called once for each CPU
232 * in the system as they are attached.
233 */
234 void
235 xc_init_cpu(struct cpu_info *ci)
236 {
237 static bool again = false;
238 int error __diagused;
239
240 if (!again) {
241 /* Autoconfiguration will prevent re-entry. */
242 xc_init();
243 again = true;
244 }
245 cv_init(&ci->ci_data.cpu_xcall, "xcall");
246 error = kthread_create(PRI_XCALL, KTHREAD_MPSAFE, ci, xc_thread,
247 NULL, NULL, "xcall/%u", ci->ci_index);
248 KASSERT(error == 0);
249 }
250
251 /*
252 * xc_broadcast:
253 *
254 * Trigger a call on all CPUs in the system.
255 */
256 uint64_t
257 xc_broadcast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2)
258 {
259
260 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
261 ASSERT_SLEEPABLE();
262
263 if (__predict_false(!mp_online)) {
264 (*func)(arg1, arg2);
265 return 0;
266 }
267
268 if ((flags & XC_HIGHPRI) != 0) {
269 int ipl = xc_extract_ipl(flags);
270 return xc_highpri(func, arg1, arg2, NULL, ipl);
271 } else {
272 return xc_lowpri(func, arg1, arg2, NULL);
273 }
274 }
275
276 static void
277 xc_nop(void *arg1, void *arg2)
278 {
279
280 return;
281 }
282
283 /*
284 * xc_barrier:
285 *
286 * Broadcast a nop to all CPUs in the system.
287 */
288 void
289 xc_barrier(unsigned int flags)
290 {
291 uint64_t where;
292
293 where = xc_broadcast(flags, xc_nop, NULL, NULL);
294 xc_wait(where);
295 }
296
297 /*
298 * xc_unicast:
299 *
300 * Trigger a call on one CPU.
301 */
302 uint64_t
303 xc_unicast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2,
304 struct cpu_info *ci)
305 {
306 int s;
307
308 KASSERT(ci != NULL);
309 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
310 ASSERT_SLEEPABLE();
311
312 if (__predict_false(!mp_online)) {
313 KASSERT(ci == curcpu());
314 s = splsoftserial();
315 (*func)(arg1, arg2);
316 splx(s);
317 return 0;
318 }
319
320 if ((flags & XC_HIGHPRI) != 0) {
321 int ipl = xc_extract_ipl(flags);
322 return xc_highpri(func, arg1, arg2, ci, ipl);
323 } else {
324 return xc_lowpri(func, arg1, arg2, ci);
325 }
326 }
327
328 /*
329 * xc_wait:
330 *
331 * Wait for a cross call to complete.
332 */
333 void
334 xc_wait(uint64_t where)
335 {
336 xc_state_t *xc;
337
338 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
339 ASSERT_SLEEPABLE();
340
341 if (__predict_false(!mp_online)) {
342 return;
343 }
344
345 /* Determine whether it is high or low priority cross-call. */
346 if ((where & XC_PRI_BIT) != 0) {
347 xc = &xc_high_pri;
348 where &= ~XC_PRI_BIT;
349 } else {
350 xc = &xc_low_pri;
351 }
352
353 #ifdef __HAVE_ATOMIC64_LOADSTORE
354 /* Fast path, if already done. */
355 if (atomic_load_acquire(&xc->xc_donep) >= where) {
356 return;
357 }
358 #endif
359
360 /* Slow path: block until awoken. */
361 mutex_enter(&xc->xc_lock);
362 while (xc->xc_donep < where) {
363 cv_wait(&xc->xc_busy, &xc->xc_lock);
364 }
365 mutex_exit(&xc->xc_lock);
366 }
367
368 /*
369 * xc_lowpri:
370 *
371 * Trigger a low priority call on one or more CPUs.
372 */
373 static inline uint64_t
374 xc_lowpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci)
375 {
376 xc_state_t *xc = &xc_low_pri;
377 CPU_INFO_ITERATOR cii;
378 uint64_t where;
379
380 mutex_enter(&xc->xc_lock);
381 while (xc->xc_headp != xc->xc_donep) {
382 cv_wait(&xc->xc_busy, &xc->xc_lock);
383 }
384 xc->xc_arg1 = arg1;
385 xc->xc_arg2 = arg2;
386 xc->xc_func = func;
387 if (ci == NULL) {
388 xc_broadcast_ev.ev_count++;
389 for (CPU_INFO_FOREACH(cii, ci)) {
390 if ((ci->ci_schedstate.spc_flags & SPCF_RUNNING) == 0)
391 continue;
392 xc->xc_headp += 1;
393 ci->ci_data.cpu_xcall_pending = true;
394 cv_signal(&ci->ci_data.cpu_xcall);
395 }
396 } else {
397 xc_unicast_ev.ev_count++;
398 xc->xc_headp += 1;
399 ci->ci_data.cpu_xcall_pending = true;
400 cv_signal(&ci->ci_data.cpu_xcall);
401 }
402 KASSERT(xc->xc_donep < xc->xc_headp);
403 where = xc->xc_headp;
404 mutex_exit(&xc->xc_lock);
405
406 /* Return a low priority ticket. */
407 KASSERT((where & XC_PRI_BIT) == 0);
408 return where;
409 }
410
411 /*
412 * xc_thread:
413 *
414 * One thread per-CPU to dispatch low priority calls.
415 */
416 static void
417 xc_thread(void *cookie)
418 {
419 struct cpu_info *ci = curcpu();
420 xc_state_t *xc = &xc_low_pri;
421 void *arg1, *arg2;
422 xcfunc_t func;
423 struct lwp *l = curlwp;
424
425 KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d",
426 l, l->l_nopreempt);
427
428 mutex_enter(&xc->xc_lock);
429 for (;;) {
430 while (!ci->ci_data.cpu_xcall_pending) {
431 if (xc->xc_headp == xc->xc_donep) {
432 cv_broadcast(&xc->xc_busy);
433 }
434 cv_wait(&ci->ci_data.cpu_xcall, &xc->xc_lock);
435 KASSERT(ci == curcpu());
436 }
437 ci->ci_data.cpu_xcall_pending = false;
438 func = xc->xc_func;
439 arg1 = xc->xc_arg1;
440 arg2 = xc->xc_arg2;
441 mutex_exit(&xc->xc_lock);
442
443 KASSERT(func != NULL);
444 (*func)(arg1, arg2);
445
446 KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d func %p",
447 l, l->l_nopreempt, func);
448
449 mutex_enter(&xc->xc_lock);
450 #ifdef __HAVE_ATOMIC64_LOADSTORE
451 atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
452 #else
453 xc->xc_donep++;
454 #endif
455 }
456 /* NOTREACHED */
457 }
458
459 /*
460 * xc_ipi_handler:
461 *
462 * Handler of cross-call IPI.
463 */
464 void
465 xc_ipi_handler(void)
466 {
467 xc_state_t *xc = & xc_high_pri;
468
469 KASSERT(xc->xc_ipl < __arraycount(xc_sihs));
470 KASSERT(xc_sihs[xc->xc_ipl] != NULL);
471
472 /* Executes xc__highpri_intr() via software interrupt. */
473 softint_schedule(xc_sihs[xc->xc_ipl]);
474 }
475
476 /*
477 * xc__highpri_intr:
478 *
479 * A software interrupt handler for high priority calls.
480 */
481 void
482 xc__highpri_intr(void *dummy)
483 {
484 xc_state_t *xc = &xc_high_pri;
485 void *arg1, *arg2;
486 xcfunc_t func;
487
488 KASSERTMSG(!cpu_intr_p(), "high priority xcall for function %p",
489 xc->xc_func);
490 /*
491 * Lock-less fetch of function and its arguments.
492 * Safe since it cannot change at this point.
493 */
494 func = xc->xc_func;
495 arg1 = xc->xc_arg1;
496 arg2 = xc->xc_arg2;
497
498 KASSERT(func != NULL);
499 (*func)(arg1, arg2);
500
501 /*
502 * Note the request as done, and if we have reached the head,
503 * cross-call has been processed - notify waiters, if any.
504 */
505 mutex_enter(&xc->xc_lock);
506 KASSERT(xc->xc_donep < xc->xc_headp);
507 #ifdef __HAVE_ATOMIC64_LOADSTORE
508 atomic_store_release(&xc->xc_donep, xc->xc_donep + 1);
509 #else
510 xc->xc_donep++;
511 #endif
512 if (xc->xc_donep == xc->xc_headp) {
513 cv_broadcast(&xc->xc_busy);
514 }
515 mutex_exit(&xc->xc_lock);
516 }
517
518 /*
519 * xc_highpri:
520 *
521 * Trigger a high priority call on one or more CPUs.
522 */
523 static inline uint64_t
524 xc_highpri(xcfunc_t func, void *arg1, void *arg2, struct cpu_info *ci,
525 unsigned int ipl)
526 {
527 xc_state_t *xc = &xc_high_pri;
528 uint64_t where;
529
530 mutex_enter(&xc->xc_lock);
531 while (xc->xc_headp != xc->xc_donep) {
532 cv_wait(&xc->xc_busy, &xc->xc_lock);
533 }
534 xc->xc_func = func;
535 xc->xc_arg1 = arg1;
536 xc->xc_arg2 = arg2;
537 xc->xc_headp += (ci ? 1 : ncpu);
538 xc->xc_ipl = ipl;
539 where = xc->xc_headp;
540 mutex_exit(&xc->xc_lock);
541
542 /*
543 * Send the IPI once lock is released.
544 * Note: it will handle the local CPU case.
545 */
546
547 #ifdef _RUMPKERNEL
548 rump_xc_highpri(ci);
549 #else
550 #ifdef MULTIPROCESSOR
551 kpreempt_disable();
552 if (curcpu() == ci) {
553 /* Unicast: local CPU. */
554 xc_ipi_handler();
555 } else if (ci) {
556 /* Unicast: remote CPU. */
557 xc_send_ipi(ci);
558 } else {
559 /* Broadcast: all, including local. */
560 xc_send_ipi(NULL);
561 xc_ipi_handler();
562 }
563 kpreempt_enable();
564 #else
565 KASSERT(ci == NULL || curcpu() == ci);
566 xc_ipi_handler();
567 #endif
568 #endif
569
570 /* Indicate a high priority ticket. */
571 return (where | XC_PRI_BIT);
572 }
573