Home | History | Annotate | Line # | Download | only in alpha
      1 /* $NetBSD: interrupt.c,v 1.100 2021/11/10 16:53:28 msaitoh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
     34  * All rights reserved.
     35  *
     36  * Authors: Keith Bostic, Chris G. Demetriou
     37  *
     38  * Permission to use, copy, modify and distribute this software and
     39  * its documentation is hereby granted, provided that both the copyright
     40  * notice and this permission notice appear in all copies of the
     41  * software, derivative works or modified versions, and any portions
     42  * thereof, and that both notices appear in supporting documentation.
     43  *
     44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     47  *
     48  * Carnegie Mellon requests users of this software to return to
     49  *
     50  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     51  *  School of Computer Science
     52  *  Carnegie Mellon University
     53  *  Pittsburgh PA 15213-3890
     54  *
     55  * any improvements or extensions that they make and grant Carnegie the
     56  * rights to redistribute these changes.
     57  */
     58 /*
     59  * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
     60  * Redistribute and modify at will, leaving only this additional copyright
     61  * notice.
     62  */
     63 
     64 #include "opt_multiprocessor.h"
     65 
     66 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     67 
     68 __KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.100 2021/11/10 16:53:28 msaitoh Exp $");
     69 
     70 #include <sys/param.h>
     71 #include <sys/systm.h>
     72 #include <sys/proc.h>
     73 #include <sys/vmmeter.h>
     74 #include <sys/sched.h>
     75 #include <sys/kernel.h>
     76 #include <sys/time.h>
     77 #include <sys/intr.h>
     78 #include <sys/device.h>
     79 #include <sys/cpu.h>
     80 #include <sys/atomic.h>
     81 
     82 #include <machine/cpuvar.h>
     83 #include <machine/autoconf.h>
     84 #include <machine/reg.h>
     85 #include <machine/rpb.h>
     86 #include <machine/frame.h>
     87 #include <machine/cpuconf.h>
     88 #include <machine/alpha.h>
     89 
     90 /* Protected by cpu_lock */
     91 struct scbvec scb_iovectab[SCB_VECTOIDX(SCB_SIZE - SCB_IOVECBASE)]
     92 							__read_mostly;
     93 
     94 static void	scb_stray(void *, u_long);
     95 
     96 void
     97 scb_init(void)
     98 {
     99 	u_long i;
    100 
    101 	for (i = 0; i < SCB_NIOVECS; i++) {
    102 		scb_iovectab[i].scb_func = scb_stray;
    103 		scb_iovectab[i].scb_arg = NULL;
    104 	}
    105 }
    106 
    107 static void
    108 scb_stray(void *arg, u_long vec)
    109 {
    110 
    111 	printf("WARNING: stray interrupt, vector 0x%lx\n", vec);
    112 }
    113 
    114 void
    115 scb_set(u_long vec, void (*func)(void *, u_long), void *arg)
    116 {
    117 	u_long idx;
    118 
    119 	KASSERT(mutex_owned(&cpu_lock));
    120 
    121 	if (vec < SCB_IOVECBASE || vec >= SCB_SIZE ||
    122 	    (vec & (SCB_VECSIZE - 1)) != 0)
    123 		panic("scb_set: bad vector 0x%lx", vec);
    124 
    125 	idx = SCB_VECTOIDX(vec - SCB_IOVECBASE);
    126 
    127 	if (scb_iovectab[idx].scb_func != scb_stray)
    128 		panic("scb_set: vector 0x%lx already occupied", vec);
    129 
    130 	scb_iovectab[idx].scb_arg = arg;
    131 	alpha_mb();
    132 	scb_iovectab[idx].scb_func = func;
    133 	alpha_mb();
    134 }
    135 
    136 u_long
    137 scb_alloc(void (*func)(void *, u_long), void *arg)
    138 {
    139 	u_long vec, idx;
    140 
    141 	KASSERT(mutex_owned(&cpu_lock));
    142 
    143 	/*
    144 	 * Allocate "downwards", to avoid bumping into
    145 	 * interrupts which are likely to be at the lower
    146 	 * vector numbers.
    147 	 */
    148 	for (vec = SCB_SIZE - SCB_VECSIZE;
    149 	     vec >= SCB_IOVECBASE; vec -= SCB_VECSIZE) {
    150 		idx = SCB_VECTOIDX(vec - SCB_IOVECBASE);
    151 		if (scb_iovectab[idx].scb_func == scb_stray) {
    152 			scb_iovectab[idx].scb_arg = arg;
    153 			alpha_mb();
    154 			scb_iovectab[idx].scb_func = func;
    155 			alpha_mb();
    156 			return (vec);
    157 		}
    158 	}
    159 
    160 	return (SCB_ALLOC_FAILED);
    161 }
    162 
    163 void
    164 scb_free(u_long vec)
    165 {
    166 	u_long idx;
    167 
    168 	KASSERT(mutex_owned(&cpu_lock));
    169 
    170 	if (vec < SCB_IOVECBASE || vec >= SCB_SIZE ||
    171 	    (vec & (SCB_VECSIZE - 1)) != 0)
    172 		panic("scb_free: bad vector 0x%lx", vec);
    173 
    174 	idx = SCB_VECTOIDX(vec - SCB_IOVECBASE);
    175 
    176 	if (scb_iovectab[idx].scb_func == scb_stray)
    177 		panic("scb_free: vector 0x%lx is empty", vec);
    178 
    179 	scb_iovectab[idx].scb_func = scb_stray;
    180 	alpha_mb();
    181 	scb_iovectab[idx].scb_arg = (void *) vec;
    182 	alpha_mb();
    183 }
    184 
    185 void
    186 interrupt(unsigned long a0, unsigned long a1, unsigned long a2,
    187     struct trapframe *framep)
    188 {
    189 	struct cpu_info *ci = curcpu();
    190 	struct cpu_softc *sc = ci->ci_softc;
    191 
    192 	switch (a0) {
    193 	case ALPHA_INTR_XPROC:	/* interprocessor interrupt */
    194 #if defined(MULTIPROCESSOR)
    195 		ci->ci_intrdepth++;
    196 
    197 		alpha_ipi_process(ci, framep);
    198 
    199 		/*
    200 		 * Handle inter-console messages if we're the primary
    201 		 * CPU.
    202 		 */
    203 		if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id &&
    204 		    hwrpb->rpb_txrdy != 0)
    205 			cpu_iccb_receive();
    206 
    207 		ci->ci_intrdepth--;
    208 #else
    209 		printf("WARNING: received interprocessor interrupt!\n");
    210 #endif /* MULTIPROCESSOR */
    211 		break;
    212 
    213 	case ALPHA_INTR_CLOCK:	/* clock interrupt */
    214 		/*
    215 		 * Rather than simply increment the interrupt depth
    216 		 * for the clock interrupt, we add 0x10.  Why?  Because
    217 		 * while we only call out a single device interrupt
    218 		 * level, technically the architecture specification
    219 		 * supports two, meaning we could have intrdepth > 1
    220 		 * just for device interrupts.
    221 		 *
    222 		 * Adding 0x10 here means that cpu_intr_p() can check
    223 		 * for "intrdepth != 0" for "in interrupt context" and
    224 		 * CLKF_INTR() can check "(intrdepth & 0xf) != 0" for
    225 		 * "was processing interrupts when the clock interrupt
    226 		 * happened".
    227 		 */
    228 		ci->ci_intrdepth += 0x10;
    229 		sc->sc_evcnt_clock.ev_count++;
    230 		ci->ci_data.cpu_nintr++;
    231 		if (platform.clockintr) {
    232 			/*
    233 			 * Call hardclock().  This will also call
    234 			 * statclock(). On the primary CPU, it
    235 			 * will also deal with time-of-day stuff.
    236 			 */
    237 			(*platform.clockintr)((struct clockframe *)framep);
    238 
    239 #if defined(MULTIPROCESSOR)
    240 			if (alpha_use_cctr) {
    241 				cc_hardclock(ci);
    242 			}
    243 #endif /* MULTIPROCESSOR */
    244 
    245 			/*
    246 			 * If it's time to call the scheduler clock,
    247 			 * do so.
    248 			 */
    249 			if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 &&
    250 			    schedhz != 0)
    251 				schedclock(ci->ci_curlwp);
    252 		}
    253 		ci->ci_intrdepth -= 0x10;
    254 		break;
    255 
    256 	case ALPHA_INTR_ERROR:	/* Machine Check or Correctable Error */
    257 		ci->ci_intrdepth++;
    258 		a0 = alpha_pal_rdmces();
    259 		if (platform.mcheck_handler != NULL &&
    260 		    (void *)framep->tf_regs[FRAME_PC] != XentArith)
    261 			(*platform.mcheck_handler)(a0, framep, a1, a2);
    262 		else
    263 			machine_check(a0, framep, a1, a2);
    264 		ci->ci_intrdepth--;
    265 		break;
    266 
    267 	case ALPHA_INTR_DEVICE:	/* I/O device interrupt */
    268 	    {
    269 		const int idx = SCB_VECTOIDX(a1 - SCB_IOVECBASE);
    270 
    271 		KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE);
    272 
    273 		atomic_inc_ulong(&sc->sc_evcnt_device.ev_count);
    274 		ci->ci_intrdepth++;
    275 
    276 		ci->ci_data.cpu_nintr++;
    277 
    278 		struct scbvec * const scb = &scb_iovectab[idx];
    279 		(*scb->scb_func)(scb->scb_arg, a1);
    280 
    281 		ci->ci_intrdepth--;
    282 		break;
    283 	    }
    284 
    285 	case ALPHA_INTR_PERF:	/* performance counter interrupt */
    286 		printf("WARNING: received performance counter interrupt!\n");
    287 		break;
    288 
    289 	case ALPHA_INTR_PASSIVE:
    290 #if 0
    291 		printf("WARNING: received passive release interrupt vec "
    292 		    "0x%lx\n", a1);
    293 #endif
    294 		break;
    295 
    296 	default:
    297 		printf("unexpected interrupt: type 0x%lx vec 0x%lx "
    298 		    "a2 0x%lx"
    299 #if defined(MULTIPROCESSOR)
    300 		    " cpu %lu"
    301 #endif
    302 		    "\n", a0, a1, a2
    303 #if defined(MULTIPROCESSOR)
    304 		    , ci->ci_cpuid
    305 #endif
    306 		    );
    307 		panic("interrupt");
    308 		/* NOTREACHED */
    309 	}
    310 }
    311 
    312 void
    313 machine_check(unsigned long mces, struct trapframe *framep,
    314     unsigned long vector, unsigned long param)
    315 {
    316 	const char *type;
    317 	struct mchkinfo *mcp;
    318 	static struct timeval ratelimit[1];
    319 
    320 	mcp = &curcpu()->ci_mcinfo;
    321 	/* Make sure it's an error we know about. */
    322 	if ((mces & (ALPHA_MCES_MIP|ALPHA_MCES_SCE|ALPHA_MCES_PCE)) == 0) {
    323 		type = "fatal machine check or error (unknown type)";
    324 		goto fatal;
    325 	}
    326 
    327 	/* Machine checks. */
    328 	if (mces & ALPHA_MCES_MIP) {
    329 		/* If we weren't expecting it, then we punt. */
    330 		if (!mcp->mc_expected) {
    331 			type = "unexpected machine check";
    332 			goto fatal;
    333 		}
    334 		mcp->mc_expected = 0;
    335 		mcp->mc_received = 1;
    336 	}
    337 
    338 	/* System correctable errors. */
    339 	if (mces & ALPHA_MCES_SCE)
    340 		printf("Warning: received system correctable error.\n");
    341 
    342 	/* Processor correctable errors. */
    343 	if (mces & ALPHA_MCES_PCE)
    344 		printf("Warning: received processor correctable error.\n");
    345 
    346 	/* Clear pending machine checks and correctable errors */
    347 	alpha_pal_wrmces(mces);
    348 	return;
    349 
    350 fatal:
    351 	alpha_pal_wrmces(mces);
    352 	if ((void *)framep->tf_regs[FRAME_PC] == XentArith) {
    353 		rlprintf(ratelimit, "Stray machine check\n");
    354 		return;
    355 	}
    356 
    357 	printf("\n");
    358 	printf("%s:\n", type);
    359 	printf("\n");
    360 	printf("    mces    = 0x%lx\n", mces);
    361 	printf("    vector  = 0x%lx\n", vector);
    362 	printf("    param   = 0x%lx\n", param);
    363 	printf("    pc      = 0x%lx\n", framep->tf_regs[FRAME_PC]);
    364 	printf("    ra      = 0x%lx\n", framep->tf_regs[FRAME_RA]);
    365 	printf("    code    = 0x%lx\n", *(unsigned long *)(param + 0x10));
    366 	printf("    curlwp = %p\n", curlwp);
    367 	if (curlwp != NULL)
    368 		printf("        pid = %d.%d, comm = %s\n",
    369 		    curproc->p_pid, curlwp->l_lid,
    370 		    curproc->p_comm);
    371 	printf("\n");
    372 	panic("machine check");
    373 }
    374 
    375 int
    376 badaddr(void *addr, size_t size)
    377 {
    378 
    379 	return (badaddr_read(addr, size, NULL));
    380 }
    381 
    382 int
    383 badaddr_read(void *addr, size_t size, void *rptr)
    384 {
    385 	lwp_t * const l = curlwp;
    386 	KPREEMPT_DISABLE(l);
    387 
    388 	struct mchkinfo *mcp = &curcpu()->ci_mcinfo;
    389 	long rcpt;
    390 	int rv;
    391 
    392 	/* Get rid of any stale machine checks that have been waiting.  */
    393 	alpha_pal_draina();
    394 
    395 	/* Tell the trap code to expect a machine check. */
    396 	mcp->mc_received = 0;
    397 	mcp->mc_expected = 1;
    398 
    399 	/* Read from the test address, and make sure the read happens. */
    400 	alpha_mb();
    401 	switch (size) {
    402 	case sizeof (uint8_t):
    403 		rcpt = *(volatile uint8_t *)addr;
    404 		break;
    405 
    406 	case sizeof (uint16_t):
    407 		rcpt = *(volatile uint16_t *)addr;
    408 		break;
    409 
    410 	case sizeof (uint32_t):
    411 		rcpt = *(volatile uint32_t *)addr;
    412 		break;
    413 
    414 	case sizeof (uint64_t):
    415 		rcpt = *(volatile uint64_t *)addr;
    416 		break;
    417 
    418 	default:
    419 		panic("badaddr: invalid size (%ld)", size);
    420 	}
    421 	alpha_mb();
    422 	alpha_mb();	/* MAGIC ON SOME SYSTEMS */
    423 
    424 	/* Make sure we took the machine check, if we caused one. */
    425 	alpha_pal_draina();
    426 
    427 	/* disallow further machine checks */
    428 	mcp->mc_expected = 0;
    429 
    430 	rv = mcp->mc_received;
    431 	mcp->mc_received = 0;
    432 
    433 	/*
    434 	 * And copy back read results (if no fault occurred).
    435 	 */
    436 	if (rptr && rv == 0) {
    437 		switch (size) {
    438 		case sizeof (uint8_t):
    439 			*(volatile uint8_t *)rptr = rcpt;
    440 			break;
    441 
    442 		case sizeof (uint16_t):
    443 			*(volatile uint16_t *)rptr = rcpt;
    444 			break;
    445 
    446 		case sizeof (uint32_t):
    447 			*(volatile uint32_t *)rptr = rcpt;
    448 			break;
    449 
    450 		case sizeof (uint64_t):
    451 			*(volatile uint64_t *)rptr = rcpt;
    452 			break;
    453 		}
    454 	}
    455 
    456 	KPREEMPT_ENABLE(l);
    457 
    458 	/* Return non-zero (i.e. true) if it's a bad address. */
    459 	return (rv);
    460 }
    461 
    462 /*
    463  * Fast soft interrupt support.
    464  */
    465 
    466 #define	SOFTINT_TO_IPL(si)						\
    467 	(ALPHA_PSL_IPL_SOFT_LO + ((ALPHA_IPL2_SOFTINTS >> (si)) & 1))
    468 
    469 #define	SOFTINTS_ELIGIBLE(ipl)						\
    470 	((ALPHA_ALL_SOFTINTS << ((ipl) << 1)) & ALPHA_ALL_SOFTINTS)
    471 
    472 /* Validate some assumptions the code makes. */
    473 __CTASSERT(SOFTINT_TO_IPL(SOFTINT_CLOCK) == ALPHA_PSL_IPL_SOFT_LO);
    474 __CTASSERT(SOFTINT_TO_IPL(SOFTINT_BIO) == ALPHA_PSL_IPL_SOFT_LO);
    475 __CTASSERT(SOFTINT_TO_IPL(SOFTINT_NET) == ALPHA_PSL_IPL_SOFT_HI);
    476 __CTASSERT(SOFTINT_TO_IPL(SOFTINT_SERIAL) == ALPHA_PSL_IPL_SOFT_HI);
    477 
    478 __CTASSERT(IPL_SOFTCLOCK == ALPHA_PSL_IPL_SOFT_LO);
    479 __CTASSERT(IPL_SOFTBIO == ALPHA_PSL_IPL_SOFT_LO);
    480 __CTASSERT(IPL_SOFTNET == ALPHA_PSL_IPL_SOFT_HI);
    481 __CTASSERT(IPL_SOFTSERIAL == ALPHA_PSL_IPL_SOFT_HI);
    482 
    483 __CTASSERT(SOFTINT_CLOCK_MASK & 0x3);
    484 __CTASSERT(SOFTINT_BIO_MASK & 0x3);
    485 __CTASSERT(SOFTINT_NET_MASK & 0xc);
    486 __CTASSERT(SOFTINT_SERIAL_MASK & 0xc);
    487 __CTASSERT(SOFTINT_COUNT == 4);
    488 
    489 __CTASSERT((ALPHA_ALL_SOFTINTS & ~0xfUL) == 0);
    490 __CTASSERT(SOFTINTS_ELIGIBLE(IPL_NONE) == ALPHA_ALL_SOFTINTS);
    491 __CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTCLOCK) == ALPHA_IPL2_SOFTINTS);
    492 __CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTBIO) == ALPHA_IPL2_SOFTINTS);
    493 __CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTNET) == 0);
    494 __CTASSERT(SOFTINTS_ELIGIBLE(IPL_SOFTSERIAL) == 0);
    495 
    496 /*
    497  * softint_trigger:
    498  *
    499  *	Trigger a soft interrupt.
    500  */
    501 void
    502 softint_trigger(uintptr_t const machdep)
    503 {
    504 	/* No need for an atomic; called at splhigh(). */
    505 	KASSERT(alpha_pal_rdps() == ALPHA_PSL_IPL_HIGH);
    506 	curcpu()->ci_ssir |= machdep;
    507 }
    508 
    509 /*
    510  * softint_init_md:
    511  *
    512  *	Machine-dependent initialization for a fast soft interrupt thread.
    513  */
    514 void
    515 softint_init_md(lwp_t * const l, u_int const level, uintptr_t * const machdep)
    516 {
    517 	lwp_t ** lp = &l->l_cpu->ci_silwps[level];
    518 	KASSERT(*lp == NULL || *lp == l);
    519 	*lp = l;
    520 
    521 	const uintptr_t si_bit = __BIT(level);
    522 	KASSERT(si_bit & ALPHA_ALL_SOFTINTS);
    523 	*machdep = si_bit;
    524 }
    525 
    526 /*
    527  * Helper macro.
    528  *
    529  * Dispatch a softint and then restart the loop so that higher
    530  * priority softints are always done first.
    531  */
    532 #define	DOSOFTINT(level)						\
    533 	if (ssir & SOFTINT_##level##_MASK) {				\
    534 		ci->ci_ssir &= ~SOFTINT_##level##_MASK;			\
    535 		alpha_softint_switchto(l, IPL_SOFT##level,		\
    536 		    ci->ci_silwps[SOFTINT_##level]);			\
    537 		KASSERT(alpha_pal_rdps() == ALPHA_PSL_IPL_HIGH);	\
    538 		continue;						\
    539 	}								\
    540 
    541 /*
    542  * alpha_softint_dispatch:
    543  *
    544  *	Process pending soft interrupts that are eligible to run
    545  *	at the specified new IPL.  Must be called at splhigh().
    546  */
    547 void
    548 alpha_softint_dispatch(int const ipl)
    549 {
    550 	struct lwp * const l = curlwp;
    551 	struct cpu_info * const ci = l->l_cpu;
    552 	unsigned long ssir;
    553 	const unsigned long eligible = SOFTINTS_ELIGIBLE(ipl);
    554 
    555 	KASSERT(alpha_pal_rdps() == ALPHA_PSL_IPL_HIGH);
    556 
    557 	for (;;) {
    558 		ssir = ci->ci_ssir & eligible;
    559 		if (ssir == 0)
    560 			break;
    561 
    562 		DOSOFTINT(SERIAL);
    563 		DOSOFTINT(NET);
    564 		DOSOFTINT(BIO);
    565 		DOSOFTINT(CLOCK);
    566 	}
    567 }
    568 
    569 /*
    570  * spllower:
    571  *
    572  *	Lower interrupt priority.  May need to check for software
    573  *	interrupts.
    574  */
    575 void
    576 spllower(int const ipl)
    577 {
    578 
    579 	if (ipl < ALPHA_PSL_IPL_SOFT_HI && curcpu()->ci_ssir) {
    580 		(void) alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
    581 		alpha_softint_dispatch(ipl);
    582 	}
    583 	(void) alpha_pal_swpipl(ipl);
    584 }
    585 
    586 /*
    587  * cpu_intr_p:
    588  *
    589  *	Return non-zero if executing in interrupt context.
    590  */
    591 bool
    592 cpu_intr_p(void)
    593 {
    594 
    595 	return curcpu()->ci_intrdepth != 0;
    596 }
    597 
    598 void	(*alpha_intr_redistribute)(void);
    599 
    600 /*
    601  * cpu_intr_redistribute:
    602  *
    603  *	Redistribute interrupts amongst CPUs eligible to handle them.
    604  */
    605 void
    606 cpu_intr_redistribute(void)
    607 {
    608 	if (alpha_intr_redistribute != NULL)
    609 		(*alpha_intr_redistribute)();
    610 }
    611 
    612 /*
    613  * cpu_intr_count:
    614  *
    615  *	Return the number of device interrupts this CPU handles.
    616  */
    617 unsigned int
    618 cpu_intr_count(struct cpu_info * const ci)
    619 {
    620 	return ci->ci_nintrhand;
    621 }
    622 
    623 /*
    624  * Security sensitive rate limiting printf
    625  */
    626 void
    627 rlprintf(struct timeval *t, const char *fmt, ...)
    628 {
    629 	va_list ap;
    630 	static const struct timeval msgperiod[1] = {{ 5, 0 }};
    631 
    632 	if (!ratecheck(t, msgperiod))
    633 		return;
    634 
    635 	va_start(ap, fmt);
    636 	vprintf(fmt, ap);
    637 	va_end(ap);
    638 }
    639