Home | History | Annotate | Line # | Download | only in pci
      1 /* $NetBSD: pci_machdep.c,v 1.33 2021/09/16 20:17:46 andvar Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1995, 1996 Carnegie-Mellon University.
     34  * All rights reserved.
     35  *
     36  * Author: Chris G. Demetriou
     37  *
     38  * Permission to use, copy, modify and distribute this software and
     39  * its documentation is hereby granted, provided that both the copyright
     40  * notice and this permission notice appear in all copies of the
     41  * software, derivative works or modified versions, and any portions
     42  * thereof, and that both notices appear in supporting documentation.
     43  *
     44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     47  *
     48  * Carnegie Mellon requests users of this software to return to
     49  *
     50  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     51  *  School of Computer Science
     52  *  Carnegie Mellon University
     53  *  Pittsburgh PA 15213-3890
     54  *
     55  * any improvements or extensions that they make and grant Carnegie the
     56  * rights to redistribute these changes.
     57  */
     58 
     59 /*
     60  * Machine-specific functions for PCI autoconfiguration.
     61  */
     62 
     63 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     64 
     65 __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.33 2021/09/16 20:17:46 andvar Exp $");
     66 
     67 #include <sys/types.h>
     68 #include <sys/param.h>
     69 #include <sys/time.h>
     70 #include <sys/systm.h>
     71 #include <sys/errno.h>
     72 #include <sys/device.h>
     73 #include <sys/cpu.h>
     74 
     75 #include <dev/isa/isavar.h>
     76 #include <dev/pci/pcireg.h>
     77 #include <dev/pci/pcivar.h>
     78 #include <dev/pci/pcidevs.h>
     79 
     80 #include "vga_pci.h"
     81 #if NVGA_PCI
     82 #include <dev/ic/mc6845reg.h>
     83 #include <dev/ic/pcdisplayvar.h>
     84 #include <dev/pci/vga_pcivar.h>
     85 #endif
     86 
     87 #include "tga.h"
     88 #if NTGA
     89 #include <dev/pci/tgavar.h>
     90 #endif
     91 
     92 #include <machine/rpb.h>
     93 
     94 void
     95 pci_display_console(bus_space_tag_t iot, bus_space_tag_t memt, pci_chipset_tag_t pc, int bus, int device, int function)
     96 {
     97 #if NVGA_PCI || NTGA
     98 	pcitag_t tag;
     99 	pcireg_t id;
    100 	int match, nmatch;
    101 #endif
    102 #if NVGA_PCI
    103 	pcireg_t class;
    104 #endif
    105 	int (*fn)(bus_space_tag_t, bus_space_tag_t, pci_chipset_tag_t,
    106 	    int, int, int);
    107 
    108 #if NVGA_PCI || NTGA
    109 	tag = pci_make_tag(pc, bus, device, function);
    110 	id = pci_conf_read(pc, tag, PCI_ID_REG);
    111 	if (id == 0 || id == 0xffffffff)
    112 		panic("pci_display_console: no device at %d/%d/%d",
    113 		    bus, device, function);
    114 #  if NVGA_PCI
    115 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
    116 #  endif
    117 
    118 	match = 0;
    119 #endif
    120 	fn = NULL;
    121 
    122 #if NVGA_PCI
    123 	nmatch = DEVICE_IS_VGA_PCI(class, id);
    124 	if (nmatch > match) {
    125 		match = nmatch;
    126 		fn = vga_pci_cnattach;
    127 	}
    128 #endif
    129 #if NTGA
    130 	nmatch = DEVICE_IS_TGA(class, id);
    131 	if (nmatch > match)
    132 		nmatch = tga_cnmatch(iot, memt, pc, tag);
    133 	if (nmatch > match) {
    134 		match = nmatch;
    135 		fn = tga_cnattach;
    136 	}
    137 #endif
    138 
    139 	if (fn != NULL)
    140 		(*fn)(iot, memt, pc, bus, device, function);
    141 	else
    142 		panic("pci_display_console: unconfigured device at %d/%d/%d",
    143 		    bus, device, function);
    144 }
    145 
    146 void
    147 device_pci_register(device_t dev, void *aux)
    148 {
    149 	struct pci_attach_args *pa = aux;
    150 	struct ctb *ctb;
    151 	prop_dictionary_t dict;
    152 
    153 	/* set properties for PCI framebuffers */
    154 	ctb = (struct ctb *)(((char *)hwrpb) + hwrpb->rpb_ctb_off);
    155 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
    156 	    ctb->ctb_term_type == CTB_GRAPHICS) {
    157 		/* XXX should consider multiple displays? */
    158 		dict = device_properties(dev);
    159 		prop_dictionary_set_bool(dict, "is_console", true);
    160 	}
    161 }
    162 
    163 void
    164 alpha_pci_intr_init(void *core, bus_space_tag_t iot, bus_space_tag_t memt,
    165     pci_chipset_tag_t pc)
    166 {
    167 	__link_set_decl(alpha_pci_intr_impls, struct alpha_pci_intr_impl);
    168 	struct alpha_pci_intr_impl * const *impl;
    169 
    170 	__link_set_foreach(impl, alpha_pci_intr_impls) {
    171 		if ((*impl)->systype == cputype) {
    172 			(*impl)->intr_init(core, iot, memt, pc);
    173 			return;
    174 		}
    175 	}
    176 	panic("%s: unknown systype %d", __func__, cputype);
    177 }
    178 
    179 void
    180 alpha_pci_intr_alloc(pci_chipset_tag_t pc, unsigned int maxstrays)
    181 {
    182 	unsigned int i;
    183 	struct evcnt *ev;
    184 	const char *cp;
    185 
    186 	pc->pc_shared_intrs = alpha_shared_intr_alloc(pc->pc_nirq);
    187 
    188 	for (i = 0; i < pc->pc_nirq; i++) {
    189 		alpha_shared_intr_set_maxstrays(pc->pc_shared_intrs, i,
    190 		    maxstrays);
    191 		alpha_shared_intr_set_private(pc->pc_shared_intrs, i,
    192 		    pc->pc_intr_v);
    193 
    194 		ev = alpha_shared_intr_evcnt(pc->pc_shared_intrs, i);
    195 		cp = alpha_shared_intr_string(pc->pc_shared_intrs, i);
    196 
    197 		evcnt_attach_dynamic(ev, EVCNT_TYPE_INTR, NULL,
    198 		    pc->pc_intr_desc, cp);
    199 	}
    200 }
    201 
    202 int
    203 alpha_pci_generic_intr_map(const struct pci_attach_args * const pa,
    204     pci_intr_handle_t * const ihp)
    205 {
    206 	pcitag_t const bustag = pa->pa_intrtag;
    207 	int const buspin = pa->pa_intrpin;
    208 	int const line = pa->pa_intrline;
    209 	pci_chipset_tag_t const pc = pa->pa_pc;
    210 	int bus, device, function;
    211 
    212 	if (buspin == 0) {
    213 		/* No IRQ used. */
    214 		return 1;
    215 	}
    216 	if (buspin < 0 || buspin > 4) {
    217 		printf("%s: bad interrupt pin %d\n", __func__, buspin);
    218 		return 1;
    219 	}
    220 
    221 	pci_decompose_tag(pc, bustag, &bus, &device, &function);
    222 
    223 	/*
    224 	 * The console firmware places the interrupt mapping in the "line"
    225 	 * value.  A valaue of (char)-1 indicates there is no mapping.
    226 	 */
    227 	if (line == 0xff) {
    228 		printf("%s: no mapping for %d/%d/%d\n", __func__,
    229 		    bus, device, function);
    230 		return 1;
    231 	}
    232 
    233 	if (line < 0 || line >= pc->pc_nirq) {
    234 		printf("%s: bad line %d for %d/%d/%d\n", __func__,
    235 		    line, bus, device, function);
    236 		return 1;
    237 	}
    238 
    239 	alpha_pci_intr_handle_init(ihp, line, 0);
    240 	return 0;
    241 }
    242 
    243 const char *
    244 alpha_pci_generic_intr_string(pci_chipset_tag_t const pc,
    245     pci_intr_handle_t const ih, char * const buf, size_t const len)
    246 {
    247 	const u_int irq = alpha_pci_intr_handle_get_irq(&ih);
    248 
    249 	KASSERT(irq < pc->pc_nirq);
    250 
    251 	snprintf(buf, len, "%s irq %u", pc->pc_intr_desc, irq);
    252 	return buf;
    253 }
    254 
    255 const struct evcnt *
    256 alpha_pci_generic_intr_evcnt(pci_chipset_tag_t const pc,
    257     pci_intr_handle_t const ih)
    258 {
    259 	const u_int irq = alpha_pci_intr_handle_get_irq(&ih);
    260 
    261 	KASSERT(irq < pc->pc_nirq);
    262 
    263 	return alpha_shared_intr_evcnt(pc->pc_shared_intrs, irq);
    264 }
    265 
    266 static struct cpu_info *
    267 alpha_pci_generic_intr_select_cpu(pci_chipset_tag_t const pc, u_int const irq,
    268     u_int const flags)
    269 {
    270 	struct cpu_info *ci, *best_ci;
    271 	CPU_INFO_ITERATOR cii;
    272 
    273 	KASSERT(mutex_owned(&cpu_lock));
    274 
    275 	/*
    276 	 * If the back-end didn't tell us where we can route, then
    277 	 * they all go to the primary CPU.
    278 	 */
    279 	if (pc->pc_eligible_cpus == 0) {
    280 		return &cpu_info_primary;
    281 	}
    282 
    283 	/*
    284 	 * If the interrupt already has a CPU assigned, keep on using it,
    285 	 * unless the CPU has become ineligible.
    286 	 */
    287 	ci = alpha_shared_intr_get_cpu(pc->pc_shared_intrs, irq);
    288 	if (ci != NULL) {
    289 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0 ||
    290 		    CPU_IS_PRIMARY(ci)) {
    291 			return ci;
    292 		}
    293 	}
    294 
    295 	/*
    296 	 * Pick the CPU with the fewest handlers.
    297 	 */
    298 	best_ci = NULL;
    299 	for (CPU_INFO_FOREACH(cii, ci)) {
    300 		if ((pc->pc_eligible_cpus & __BIT(ci->ci_cpuid)) == 0) {
    301 			/* This CPU is not eligible in hardware. */
    302 			continue;
    303 		}
    304 		if (ci->ci_schedstate.spc_flags & SPCF_NOINTR) {
    305 			/* This CPU is not eligible in software. */
    306 			continue;
    307 		}
    308 		if (best_ci == NULL ||
    309 		    ci->ci_nintrhand < best_ci->ci_nintrhand) {
    310 			best_ci = ci;
    311 		}
    312 	}
    313 
    314 	/* If we found one, cool... */
    315 	if (best_ci != NULL) {
    316 		return best_ci;
    317 	}
    318 
    319 	/* ...if not, well I guess we'll just fall back on the primary. */
    320 	return &cpu_info_primary;
    321 }
    322 
    323 void *
    324 alpha_pci_generic_intr_establish(pci_chipset_tag_t const pc,
    325     pci_intr_handle_t const ih, int const level,
    326     int (*func)(void *), void *arg)
    327 {
    328 	const u_int irq = alpha_pci_intr_handle_get_irq(&ih);
    329 	const u_int flags = alpha_pci_intr_handle_get_flags(&ih);
    330 	void *cookie;
    331 
    332 	KASSERT(irq < pc->pc_nirq);
    333 
    334 	cookie = alpha_shared_intr_alloc_intrhand(pc->pc_shared_intrs,
    335 	    irq, IST_LEVEL, level, flags, func, arg, pc->pc_intr_desc);
    336 
    337 	if (cookie == NULL)
    338 		return NULL;
    339 
    340 	mutex_enter(&cpu_lock);
    341 
    342 	struct cpu_info *target_ci =
    343 	    alpha_pci_generic_intr_select_cpu(pc, irq, flags);
    344 	struct cpu_info *current_ci =
    345 	    alpha_shared_intr_get_cpu(pc->pc_shared_intrs, irq);
    346 
    347 	const bool first_handler =
    348 	    ! alpha_shared_intr_isactive(pc->pc_shared_intrs, irq);
    349 
    350 	/*
    351 	 * If this is the first handler on this interrupt, or if the
    352 	 * target CPU has changed, then program the route if the
    353 	 * hardware supports it.
    354 	 */
    355 	if (first_handler || target_ci != current_ci) {
    356 		alpha_shared_intr_set_cpu(pc->pc_shared_intrs, irq, target_ci);
    357 		if (pc->pc_intr_set_affinity != NULL) {
    358 			pc->pc_intr_set_affinity(pc, irq, target_ci);
    359 		}
    360 	}
    361 
    362 	if (! alpha_shared_intr_link(pc->pc_shared_intrs, cookie,
    363 				     pc->pc_intr_desc)) {
    364 		mutex_exit(&cpu_lock);
    365 		alpha_shared_intr_free_intrhand(cookie);
    366 		return NULL;
    367 	}
    368 
    369 	if (first_handler) {
    370 		scb_set(pc->pc_vecbase + SCB_IDXTOVEC(irq),
    371 		    alpha_pci_generic_iointr, pc);
    372 		pc->pc_intr_enable(pc, irq);
    373 	}
    374 
    375 	mutex_exit(&cpu_lock);
    376 
    377 	return cookie;
    378 }
    379 
    380 void
    381 alpha_pci_generic_intr_disestablish(pci_chipset_tag_t const pc,
    382     void * const cookie)
    383 {
    384 	struct alpha_shared_intrhand * const ih = cookie;
    385 	const u_int irq = ih->ih_num;
    386 
    387 	mutex_enter(&cpu_lock);
    388 
    389 	if (alpha_shared_intr_firstactive(pc->pc_shared_intrs, irq)) {
    390 		pc->pc_intr_disable(pc, irq);
    391 		alpha_shared_intr_set_dfltsharetype(pc->pc_shared_intrs,
    392 		    irq, IST_NONE);
    393 		scb_free(pc->pc_vecbase + SCB_IDXTOVEC(irq));
    394 	}
    395 
    396 	alpha_shared_intr_unlink(pc->pc_shared_intrs, cookie, pc->pc_intr_desc);
    397 
    398 	mutex_exit(&cpu_lock);
    399 
    400 	alpha_shared_intr_free_intrhand(cookie);
    401 }
    402 
    403 void
    404 alpha_pci_generic_iointr(void * const arg, unsigned long const vec)
    405 {
    406 	pci_chipset_tag_t const pc = arg;
    407 	const u_int irq = SCB_VECTOIDX(vec - pc->pc_vecbase);
    408 
    409 	if (!alpha_shared_intr_dispatch(pc->pc_shared_intrs, irq)) {
    410 		alpha_shared_intr_stray(pc->pc_shared_intrs, irq,
    411 		    pc->pc_intr_desc);
    412 		if (ALPHA_SHARED_INTR_DISABLE(pc->pc_shared_intrs, irq)) {
    413 			pc->pc_intr_disable(pc, irq);
    414 		}
    415 	} else {
    416 		alpha_shared_intr_reset_strays(pc->pc_shared_intrs, irq);
    417 	}
    418 }
    419 
    420 void
    421 alpha_pci_generic_intr_redistribute(pci_chipset_tag_t const pc)
    422 {
    423 	struct cpu_info *current_ci, *new_ci;
    424 	unsigned int irq;
    425 
    426 	KASSERT(mutex_owned(&cpu_lock));
    427 	KASSERT(mp_online);
    428 
    429 	/* If we can't set affinity, then there's nothing to do. */
    430 	if (pc->pc_eligible_cpus == 0 || pc->pc_intr_set_affinity == NULL) {
    431 		return;
    432 	}
    433 
    434 	/*
    435 	 * Look at each IRQ, and allocate a new CPU for each IRQ
    436 	 * that's being serviced by a now-shielded CPU.
    437 	 */
    438 	for (irq = 0; irq < pc->pc_nirq; irq++) {
    439 		current_ci =
    440 		    alpha_shared_intr_get_cpu(pc->pc_shared_intrs, irq);
    441 		if (current_ci == NULL ||
    442 		    (current_ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
    443 			continue;
    444 		}
    445 
    446 		new_ci = alpha_pci_generic_intr_select_cpu(pc, irq, 0);
    447 		if (new_ci == current_ci) {
    448 			/* Can't shield this one. */
    449 			continue;
    450 		}
    451 
    452 		alpha_shared_intr_set_cpu(pc->pc_shared_intrs, irq, new_ci);
    453 		pc->pc_intr_set_affinity(pc, irq, new_ci);
    454 	}
    455 
    456 	/* XXX should now re-balance */
    457 }
    458 
    459 #define	ALPHA_PCI_INTR_HANDLE_IRQ	__BITS(0,31)
    460 #define	ALPHA_PCI_INTR_HANDLE_FLAGS	__BITS(32,63)
    461 
    462 void
    463 alpha_pci_intr_handle_init(pci_intr_handle_t * const ihp, u_int const irq,
    464     u_int const flags)
    465 {
    466 	ihp->value = __SHIFTIN(irq, ALPHA_PCI_INTR_HANDLE_IRQ) |
    467 	    __SHIFTIN(flags, ALPHA_PCI_INTR_HANDLE_FLAGS);
    468 }
    469 
    470 void
    471 alpha_pci_intr_handle_set_irq(pci_intr_handle_t * const ihp, u_int const irq)
    472 {
    473 	ihp->value = (ihp->value & ALPHA_PCI_INTR_HANDLE_FLAGS) |
    474 	    __SHIFTIN(irq, ALPHA_PCI_INTR_HANDLE_IRQ);
    475 }
    476 
    477 u_int
    478 alpha_pci_intr_handle_get_irq(const pci_intr_handle_t * const ihp)
    479 {
    480 	return __SHIFTOUT(ihp->value, ALPHA_PCI_INTR_HANDLE_IRQ);
    481 }
    482 
    483 void
    484 alpha_pci_intr_handle_set_flags(pci_intr_handle_t * const ihp,
    485     u_int const flags)
    486 {
    487 	ihp->value = (ihp->value & ALPHA_PCI_INTR_HANDLE_IRQ) |
    488 	    __SHIFTIN(flags, ALPHA_PCI_INTR_HANDLE_FLAGS);
    489 }
    490 
    491 u_int
    492 alpha_pci_intr_handle_get_flags(const pci_intr_handle_t * const ihp)
    493 {
    494 	return __SHIFTOUT(ihp->value, ALPHA_PCI_INTR_HANDLE_FLAGS);
    495 }
    496 
    497 /*
    498  * MI PCI back-end entry points.
    499  */
    500 
    501 void
    502 pci_attach_hook(device_t const parent, device_t const self,
    503     struct pcibus_attach_args * const pba)
    504 {
    505 	pci_chipset_tag_t const pc = pba->pba_pc;
    506 
    507 	if (pc->pc_attach_hook != NULL) {
    508 		pc->pc_attach_hook(parent, self, pba);
    509 	}
    510 }
    511 
    512 int
    513 pci_bus_maxdevs(pci_chipset_tag_t const pc, int const busno)
    514 {
    515 	if (pc->pc_bus_maxdevs == NULL) {
    516 		return 32;
    517 	}
    518 
    519 	return pc->pc_bus_maxdevs(pc->pc_conf_v, busno);
    520 }
    521 
    522 pcitag_t
    523 pci_make_tag(pci_chipset_tag_t const pc, int const bus, int const dev,
    524     int const func)
    525 {
    526 	if (__predict_true(pc->pc_make_tag == NULL)) {
    527 		/* Just use the standard Type 1 address format. */
    528 		return __SHIFTIN(bus, PCI_CONF_TYPE1_BUS) |
    529 		       __SHIFTIN(dev, PCI_CONF_TYPE1_DEVICE) |
    530 		       __SHIFTIN(func, PCI_CONF_TYPE1_FUNCTION);
    531 	}
    532 
    533 	return pc->pc_make_tag(pc->pc_conf_v, bus, dev, func);
    534 }
    535 
    536 void
    537 pci_decompose_tag(pci_chipset_tag_t const pc, pcitag_t const tag,
    538     int * const busp, int * const devp, int * const funcp)
    539 {
    540 	if (__predict_true(pc->pc_decompose_tag == NULL)) {
    541 		if (busp != NULL)
    542 			*busp = __SHIFTOUT(tag, PCI_CONF_TYPE1_BUS);
    543 		if (devp != NULL)
    544 			*devp = __SHIFTOUT(tag, PCI_CONF_TYPE1_DEVICE);
    545 		if (funcp != NULL)
    546 			*funcp = __SHIFTOUT(tag, PCI_CONF_TYPE1_FUNCTION);
    547 		return;
    548 	}
    549 
    550 	pc->pc_decompose_tag(pc->pc_conf_v, tag, busp, devp, funcp);
    551 }
    552 
    553 pcireg_t
    554 pci_conf_read(pci_chipset_tag_t const pc, pcitag_t const tag, int const reg)
    555 {
    556 	KASSERT(pc->pc_conf_read != NULL);
    557 	return pc->pc_conf_read(pc->pc_conf_v, tag, reg);
    558 }
    559 
    560 void
    561 pci_conf_write(pci_chipset_tag_t const pc, pcitag_t const tag, int const reg,
    562     pcireg_t const val)
    563 {
    564 	KASSERT(pc->pc_conf_write != NULL);
    565 	pc->pc_conf_write(pc->pc_conf_v, tag, reg, val);
    566 }
    567 
    568 int
    569 pci_intr_map(const struct pci_attach_args * const pa,
    570     pci_intr_handle_t * const ihp)
    571 {
    572 	pci_chipset_tag_t const pc = pa->pa_pc;
    573 
    574 	KASSERT(pc->pc_intr_map != NULL);
    575 	return pc->pc_intr_map(pa, ihp);
    576 }
    577 
    578 const char *
    579 pci_intr_string(pci_chipset_tag_t const pc, pci_intr_handle_t const ih,
    580     char * const buf, size_t const len)
    581 {
    582 	KASSERT(pc->pc_intr_string != NULL);
    583 	return pc->pc_intr_string(pc, ih, buf, len);
    584 }
    585 
    586 const struct evcnt *
    587 pci_intr_evcnt(pci_chipset_tag_t const pc, pci_intr_handle_t const ih)
    588 {
    589 	KASSERT(pc->pc_intr_evcnt != NULL);
    590 	return pc->pc_intr_evcnt(pc, ih);
    591 }
    592 
    593 void *
    594 pci_intr_establish(pci_chipset_tag_t const pc, pci_intr_handle_t const ih,
    595     int const ipl, int (*func)(void *), void *arg)
    596 {
    597 	KASSERT(pc->pc_intr_establish != NULL);
    598 	return pc->pc_intr_establish(pc, ih, ipl, func, arg);
    599 }
    600 
    601 void
    602 pci_intr_disestablish(pci_chipset_tag_t const pc, void * const cookie)
    603 {
    604 	KASSERT(pc->pc_intr_disestablish != NULL);
    605 	pc->pc_intr_disestablish(pc, cookie);
    606 }
    607 
    608 int
    609 pci_intr_setattr(pci_chipset_tag_t const pc __unused,
    610     pci_intr_handle_t * const ihp, int const attr, uint64_t const data)
    611 {
    612 	u_int flags = alpha_pci_intr_handle_get_flags(ihp);
    613 
    614 	switch (attr) {
    615 	case PCI_INTR_MPSAFE:
    616 		if (data)
    617 			flags |= ALPHA_INTR_MPSAFE;
    618 		else
    619 			flags &= ~ALPHA_INTR_MPSAFE;
    620 		break;
    621 
    622 	default:
    623 		return ENODEV;
    624 	}
    625 
    626 	alpha_pci_intr_handle_set_flags(ihp, flags);
    627 	return 0;
    628 }
    629