Home | History | Annotate | Line # | Download | only in hppa
      1 /*	$NetBSD: intr.c,v 1.8 2022/09/29 06:39:59 skrll Exp $	*/
      2 /*	$OpenBSD: intr.c,v 1.27 2009/12/31 12:52:35 jsing Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Matthew Fredette.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Interrupt handling for NetBSD/hppa.
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.8 2022/09/29 06:39:59 skrll Exp $");
     39 
     40 #define __MUTEX_PRIVATE
     41 
     42 #include <sys/param.h>
     43 #include <sys/cpu.h>
     44 
     45 #include <uvm/uvm_extern.h>
     46 
     47 #include <machine/autoconf.h>
     48 #include <machine/cpufunc.h>
     49 #include <machine/intr.h>
     50 #include <machine/reg.h>
     51 
     52 #include <hppa/hppa/machdep.h>
     53 
     54 #include <machine/mutex.h>
     55 
     56 #if defined(_KERNEL_OPT)
     57 #include "opt_lockdebug.h"
     58 #endif
     59 
     60 static int hppa_intr_ipl_next(struct cpu_info *);
     61 void hppa_intr_calculatemasks(struct cpu_info *);
     62 int hppa_intr_ipending(struct hppa_interrupt_register *, int);
     63 void hppa_intr_dispatch(int , int , struct trapframe *);
     64 
     65 /* The list of all interrupt registers. */
     66 struct hppa_interrupt_register *hppa_interrupt_registers[HPPA_INTERRUPT_BITS];
     67 
     68 
     69 /*
     70  * This establishes a new interrupt register.
     71  */
     72 void
     73 hppa_interrupt_register_establish(struct cpu_info *ci,
     74     struct hppa_interrupt_register *ir)
     75 {
     76 	int idx;
     77 
     78 	/* Initialize the register structure. */
     79 	memset(ir, 0, sizeof(*ir));
     80 	ir->ir_ci = ci;
     81 
     82 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
     83 		ir->ir_bits_map[idx] = IR_BIT_UNUSED;
     84 
     85 	ir->ir_bits = ~0;
     86 	/* Add this structure to the list. */
     87 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
     88 		if (hppa_interrupt_registers[idx] == NULL)
     89 			break;
     90 	if (idx == HPPA_INTERRUPT_BITS)
     91 		panic("%s: too many regs", __func__);
     92 	hppa_interrupt_registers[idx] = ir;
     93 }
     94 
     95 /*
     96  * This initialise interrupts for a CPU.
     97  */
     98 void
     99 hppa_intr_initialise(struct cpu_info *ci)
    100 {
    101 	int i;
    102 
    103 	/* Initialize all prority level masks to mask everything. */
    104 	for (i = 0; i < NIPL; i++)
    105 		ci->ci_imask[i] = -1;
    106 
    107 	/* We are now at the highest priority level. */
    108 	ci->ci_cpl = -1;
    109 
    110 	/* There are no pending interrupts. */
    111 	ci->ci_ipending = 0;
    112 
    113 	/* We are not running an interrupt handler. */
    114 	ci->ci_intr_depth = 0;
    115 
    116 	/* There are no interrupt handlers. */
    117 	memset(ci->ci_ib, 0, sizeof(ci->ci_ib));
    118 
    119 	/* There are no interrupt registers. */
    120 	memset(hppa_interrupt_registers, 0, sizeof(hppa_interrupt_registers));
    121 }
    122 
    123 /*
    124  * This establishes a new interrupt handler.
    125  */
    126 void *
    127 hppa_intr_establish(int ipl, int (*handler)(void *), void *arg,
    128     struct hppa_interrupt_register *ir, int bit_pos)
    129 {
    130 	struct hppa_interrupt_bit *ib;
    131 	struct cpu_info *ci = ir->ir_ci;
    132 	int idx;
    133 
    134 	/* Panic on a bad interrupt bit. */
    135 	if (bit_pos < 0 || bit_pos >= HPPA_INTERRUPT_BITS)
    136 		panic("%s: bad interrupt bit %d", __func__, bit_pos);
    137 
    138 	/*
    139 	 * Panic if this interrupt bit is already handled, but allow
    140 	 * shared interrupts for cascaded registers, e.g. dino and gsc
    141 	 * XXX This could be improved.
    142 	 */
    143 	if (handler != NULL) {
    144 		if (IR_BIT_USED_P(ir->ir_bits_map[31 ^ bit_pos]))
    145 			panic("%s: interrupt already handled", __func__);
    146 	}
    147 
    148 	/*
    149 	 * If this interrupt bit leads us to another interrupt register,
    150 	 * simply note that in the mapping for the bit.
    151 	 */
    152 	if (handler == NULL) {
    153 		for (idx = 1; idx < HPPA_INTERRUPT_BITS; idx++)
    154 			if (hppa_interrupt_registers[idx] == arg)
    155 				break;
    156 		if (idx == HPPA_INTERRUPT_BITS)
    157 			panic("%s: unknown int reg", __func__);
    158 
    159 		ir->ir_bits_map[31 ^ bit_pos] = IR_BIT_REG(idx);
    160 
    161 		return NULL;
    162 	}
    163 
    164 	/*
    165 	 * Otherwise, allocate a new bit in the spl.
    166 	 */
    167 	idx = hppa_intr_ipl_next(ir->ir_ci);
    168 
    169 	ir->ir_bits &= ~(1 << bit_pos);
    170 	ir->ir_rbits &= ~(1 << bit_pos);
    171 	if (!IR_BIT_USED_P(ir->ir_bits_map[31 ^ bit_pos])) {
    172 		ir->ir_bits_map[31 ^ bit_pos] = 1 << idx;
    173 	} else {
    174 		int j;
    175 
    176 		ir->ir_bits_map[31 ^ bit_pos] |= 1 << idx;
    177 		j = (ir - hppa_interrupt_registers[0]);
    178 		ci->ci_ishared |= (1 << j);
    179 	}
    180 	ib = &ci->ci_ib[idx];
    181 
    182 	/* Fill this interrupt bit. */
    183 	ib->ib_reg = ir;
    184 	ib->ib_ipl = ipl;
    185 	ib->ib_spl = (1 << idx);
    186 	snprintf(ib->ib_name, sizeof(ib->ib_name), "irq %d", bit_pos);
    187 
    188 	evcnt_attach_dynamic(&ib->ib_evcnt, EVCNT_TYPE_INTR, NULL, ir->ir_name,
    189 	     ib->ib_name);
    190 	ib->ib_handler = handler;
    191 	ib->ib_arg = arg;
    192 
    193 	hppa_intr_calculatemasks(ci);
    194 
    195 	return ib;
    196 }
    197 
    198 /*
    199  * This allocates an interrupt bit within an interrupt register.
    200  * It returns the bit position, or -1 if no bits were available.
    201  */
    202 int
    203 hppa_intr_allocate_bit(struct hppa_interrupt_register *ir, int irq)
    204 {
    205 	int bit_pos;
    206 	int last_bit;
    207 	u_int mask;
    208 	int *bits;
    209 
    210 	if (irq == -1) {
    211 		bit_pos = 31;
    212 		last_bit = 0;
    213 		bits = &ir->ir_bits;
    214 	} else {
    215 		bit_pos = irq;
    216 		last_bit = irq;
    217 		bits = &ir->ir_rbits;
    218 	}
    219 	for (mask = (1 << bit_pos); bit_pos >= last_bit; bit_pos--) {
    220 		if (*bits & mask)
    221 			break;
    222 		mask >>= 1;
    223 	}
    224 	if (bit_pos >= last_bit) {
    225 		*bits &= ~mask;
    226 		return bit_pos;
    227 	}
    228 
    229 	return -1;
    230 }
    231 
    232 /*
    233  * This returns the next available spl bit.
    234  */
    235 static int
    236 hppa_intr_ipl_next(struct cpu_info *ci)
    237 {
    238 	int idx;
    239 
    240 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
    241 		if (ci->ci_ib[idx].ib_reg == NULL)
    242 			break;
    243 	if (idx == HPPA_INTERRUPT_BITS)
    244 		panic("%s: too many devices", __func__);
    245 	return idx;
    246 }
    247 
    248 /*
    249  * This finally initializes interrupts.
    250  */
    251 void
    252 hppa_intr_calculatemasks(struct cpu_info *ci)
    253 {
    254 	struct hppa_interrupt_bit *ib;
    255 	struct hppa_interrupt_register *ir;
    256 	int idx, bit_pos;
    257 	int mask;
    258 	int ipl;
    259 
    260 	/*
    261 	 * Put together the initial imask for each level.
    262 	 */
    263 	memset(ci->ci_imask, 0, sizeof(ci->ci_imask));
    264 	for (bit_pos = 0; bit_pos < HPPA_INTERRUPT_BITS; bit_pos++) {
    265 		ib = &ci->ci_ib[bit_pos];
    266 		if (ib->ib_reg == NULL)
    267 			continue;
    268 		ci->ci_imask[ib->ib_ipl] |= ib->ib_spl;
    269 	}
    270 
    271 	/*
    272 	 * IPL_NONE is used for hardware interrupts that are never blocked,
    273 	 * and do not block anything else.
    274 	 */
    275 	ci->ci_imask[IPL_NONE] = 0;
    276 
    277 	/*
    278 	 * Enforce a hierarchy that gives slow devices a better chance at not
    279 	 * dropping data.
    280 	 */
    281 	for (ipl = NIPL - 1; ipl > 0; ipl--)
    282 		ci->ci_imask[ipl - 1] |= ci->ci_imask[ipl];
    283 
    284 	/*
    285 	 * Load all mask registers, loading %eiem last.  This will finally
    286 	 * enable interrupts, but since cpl and ipending should be -1 and 0,
    287 	 * respectively, no interrupts will get dispatched until the priority
    288 	 * level is lowered.
    289 	 */
    290 	KASSERT(ci->ci_cpl == -1);
    291 	KASSERT(ci->ci_ipending == 0);
    292 
    293 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++) {
    294 		ir = hppa_interrupt_registers[idx];
    295 		if (ir == NULL || ir->ir_ci != ci)
    296 			continue;
    297 		mask = 0;
    298 		for (bit_pos = 0; bit_pos < HPPA_INTERRUPT_BITS; bit_pos++) {
    299 			if (!IR_BIT_UNUSED_P(ir->ir_bits_map[31 ^ bit_pos]))
    300 				mask |= (1 << bit_pos);
    301 		}
    302 		if (ir->ir_iscpu)
    303 			ir->ir_ci->ci_eiem = mask;
    304 		else if (ir->ir_mask != NULL)
    305 			*ir->ir_mask = mask;
    306 	}
    307 }
    308 
    309 void
    310 hppa_intr_enable(void)
    311 {
    312 	struct cpu_info *ci = curcpu();
    313 
    314 	mtctl(ci->ci_eiem, CR_EIEM);
    315 	ci->ci_psw |= PSW_I;
    316 	hppa_enable_irq();
    317 }
    318 
    319 
    320 /*
    321  * Service interrupts.  This doesn't necessarily dispatch them.  This is called
    322  * with %eiem loaded with zero.  It's named hppa_intr instead of hppa_intr
    323  * because trap.c calls it.
    324  */
    325 void
    326 hppa_intr(struct trapframe *frame)
    327 {
    328 	struct cpu_info *ci = curcpu();
    329 	int eirr;
    330 	int i;
    331 
    332 #ifndef LOCKDEBUG
    333 	extern char mutex_enter_crit_start[];
    334 	extern char mutex_enter_crit_end[];
    335 
    336 #ifndef	MULTIPROCESSOR
    337 	extern char _lock_cas_ras_start[];
    338 	extern char _lock_cas_ras_end[];
    339 
    340 	if (frame->tf_iisq_head == HPPA_SID_KERNEL &&
    341 	    frame->tf_iioq_head > (u_int)_lock_cas_ras_start &&
    342 	    frame->tf_iioq_head < (u_int)_lock_cas_ras_end) {
    343 		frame->tf_iioq_head = (u_int)_lock_cas_ras_start;
    344 		frame->tf_iioq_tail = (u_int)_lock_cas_ras_start + 4;
    345 	}
    346 #endif
    347 
    348 	/*
    349 	 * If we interrupted in the middle of mutex_enter(), we must patch up
    350 	 * the lock owner value quickly if we got the interlock.  If any of the
    351 	 * interrupt handlers need to acquire the mutex, they could deadlock if
    352 	 * the owner value is left unset.
    353 	 */
    354 	if (frame->tf_iisq_head == HPPA_SID_KERNEL &&
    355 	    frame->tf_iioq_head > (u_int)mutex_enter_crit_start &&
    356 	    frame->tf_iioq_head < (u_int)mutex_enter_crit_end &&
    357 	    frame->tf_ret0 != 0)
    358 		((kmutex_t *)frame->tf_arg0)->mtx_owner = (uintptr_t)curlwp;
    359 #endif
    360 
    361 	/*
    362 	 * Read the CPU interrupt register and acknowledge all interrupts.
    363 	 * Starting with this value, get our set of new pending interrupts and
    364 	 * add these new bits to ipending.
    365 	 */
    366 	mfctl(CR_EIRR, eirr);
    367 	mtctl(eirr, CR_EIRR);
    368 
    369 	ci->ci_ipending |= hppa_intr_ipending(&ci->ci_ir, eirr);
    370 
    371 	i = 0;
    372 	/* If we have interrupts to dispatch, do so. */
    373 	while (ci->ci_ipending & ~ci->ci_cpl) {
    374 		int shared;
    375 
    376 		hppa_intr_dispatch(ci->ci_cpl, frame->tf_eiem, frame);
    377 
    378 		shared = ci->ci_ishared;
    379 		while (shared) {
    380 			struct hppa_interrupt_register *sir;
    381 			int sbit, lvl;
    382 
    383 			sbit = ffs(shared) - 1;
    384 			sir = hppa_interrupt_registers[sbit];
    385 			lvl = *sir->ir_level;
    386 
    387 			ci->ci_ipending |= hppa_intr_ipending(sir, lvl);
    388 			shared &= ~(1 << sbit);
    389 		}
    390 		i++;
    391 		KASSERTMSG(i <= 2,
    392 		    "%s: ci->ipending %08x ci->ci_cpl %08x shared %08x\n",
    393 		    __func__, ci->ci_ipending, ci->ci_cpl, shared);
    394 	}
    395 }
    396 
    397 /*
    398  * Dispatch interrupts.  This dispatches at least one interrupt.
    399  * This is called with %eiem loaded with zero.
    400  */
    401 void
    402 hppa_intr_dispatch(int ncpl, int eiem, struct trapframe *frame)
    403 {
    404 	struct cpu_info *ci = curcpu();
    405 	struct hppa_interrupt_bit *ib;
    406 	struct clockframe clkframe;
    407 	int ipending_run;
    408 	int bit_pos;
    409 	void *arg;
    410 	int handled __unused;
    411 	bool locked = false;
    412 
    413 	/*
    414 	 * Increment our depth
    415 	 */
    416 	ci->ci_intr_depth++;
    417 
    418 	/* Loop while we have interrupts to dispatch. */
    419 	for (;;) {
    420 
    421 		/* Read ipending and mask it with ncpl. */
    422 		ipending_run = (ci->ci_ipending & ~ncpl);
    423 		if (ipending_run == 0)
    424 			break;
    425 
    426 		/* Choose one of the resulting bits to dispatch. */
    427 		bit_pos = ffs(ipending_run) - 1;
    428 
    429 		/*
    430 		 * If this interrupt handler takes the clockframe
    431 		 * as an argument, conjure one up.
    432 		 */
    433 		ib = &ci->ci_ib[bit_pos];
    434 		ib->ib_evcnt.ev_count++;
    435 		arg = ib->ib_arg;
    436 		if (arg == NULL) {
    437 			clkframe.cf_flags = (ci->ci_intr_depth > 1 ?
    438 			    TFF_INTR : 0);
    439 			clkframe.cf_spl = ncpl;
    440 			if (frame != NULL) {
    441 				clkframe.cf_flags |= frame->tf_flags;
    442 				clkframe.cf_pc = frame->tf_iioq_head;
    443 			}
    444 			arg = &clkframe;
    445 		}
    446 
    447 		/*
    448 		 * Remove this bit from ipending, raise spl to
    449 		 * the level required to run this interrupt,
    450 		 * and reenable interrupts.
    451 		 */
    452 		ci->ci_ipending &= ~(1 << bit_pos);
    453 		ci->ci_cpl = ncpl | ci->ci_imask[ib->ib_ipl];
    454 		mtctl(eiem, CR_EIEM);
    455 
    456 		if (ib->ib_ipl == IPL_VM) {
    457 			KERNEL_LOCK(1, NULL);
    458 			locked = true;
    459 		}
    460 
    461 		/* Count and dispatch the interrupt. */
    462 		ci->ci_data.cpu_nintr++;
    463 		handled = (*ib->ib_handler)(arg);
    464 #if 0
    465 		if (!handled)
    466 			printf("%s: can't handle interrupt\n",
    467 				ib->ib_evcnt.ev_name);
    468 #endif
    469 		if (locked) {
    470 			KERNEL_UNLOCK_ONE(NULL);
    471 			locked = false;
    472 		}
    473 
    474 		/* Disable interrupts and loop. */
    475 		mtctl(0, CR_EIEM);
    476 	}
    477 
    478 	/* Interrupts are disabled again, restore cpl and the depth. */
    479 	ci->ci_cpl = ncpl;
    480 	ci->ci_intr_depth--;
    481 }
    482 
    483 
    484 int
    485 hppa_intr_ipending(struct hppa_interrupt_register *ir, int eirr)
    486 {
    487 	int pending = 0;
    488 	int idx;
    489 
    490 	for (idx = 31; idx >= 0; idx--) {
    491 		if ((eirr & (1 << idx)) == 0)
    492 			continue;
    493 		if (IR_BIT_NESTED_P(ir->ir_bits_map[31 ^ idx])) {
    494 			struct hppa_interrupt_register *nir;
    495 			int reg = ir->ir_bits_map[31 ^ idx] & ~IR_BIT_MASK;
    496 
    497 			nir = hppa_interrupt_registers[reg];
    498 			pending |= hppa_intr_ipending(nir, *(nir->ir_req));
    499 		} else {
    500 			pending |= ir->ir_bits_map[31 ^ idx];
    501 		}
    502 	}
    503 
    504 	return pending;
    505 }
    506 
    507 bool
    508 cpu_intr_p(void)
    509 {
    510 	struct cpu_info *ci = curcpu();
    511 
    512 #ifdef __HAVE_FAST_SOFTINTS
    513 #error this should not count fast soft interrupts
    514 #else
    515 	return ci->ci_intr_depth != 0;
    516 #endif
    517 }
    518