Home | History | Annotate | Line # | Download | only in x86
xen_intr.c revision 1.12
      1  1.12  cherry /*	$NetBSD: xen_intr.c,v 1.12 2018/12/25 09:00:26 cherry Exp $	*/
      2   1.2  bouyer 
      3   1.2  bouyer /*-
      4   1.2  bouyer  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5   1.2  bouyer  * All rights reserved.
      6   1.2  bouyer  *
      7   1.2  bouyer  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2  bouyer  * by Charles M. Hannum, and by Jason R. Thorpe.
      9   1.2  bouyer  *
     10   1.2  bouyer  * Redistribution and use in source and binary forms, with or without
     11   1.2  bouyer  * modification, are permitted provided that the following conditions
     12   1.2  bouyer  * are met:
     13   1.2  bouyer  * 1. Redistributions of source code must retain the above copyright
     14   1.2  bouyer  *    notice, this list of conditions and the following disclaimer.
     15   1.2  bouyer  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2  bouyer  *    notice, this list of conditions and the following disclaimer in the
     17   1.2  bouyer  *    documentation and/or other materials provided with the distribution.
     18   1.2  bouyer  *
     19   1.2  bouyer  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2  bouyer  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2  bouyer  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2  bouyer  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2  bouyer  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2  bouyer  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2  bouyer  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2  bouyer  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2  bouyer  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2  bouyer  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2  bouyer  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2  bouyer  */
     31   1.2  bouyer 
     32   1.2  bouyer #include <sys/cdefs.h>
     33  1.12  cherry __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.12 2018/12/25 09:00:26 cherry Exp $");
     34   1.2  bouyer 
     35   1.2  bouyer #include <sys/param.h>
     36  1.10  cherry #include <sys/kernel.h>
     37  1.10  cherry #include <sys/kmem.h>
     38  1.10  cherry 
     39  1.11  cherry #include <sys/cpu.h>
     40  1.11  cherry 
     41  1.10  cherry #include <xen/evtchn.h>
     42   1.2  bouyer 
     43  1.12  cherry #include <uvm/uvm.h>
     44  1.12  cherry 
     45   1.2  bouyer #include <machine/cpu.h>
     46   1.2  bouyer #include <machine/intr.h>
     47   1.2  bouyer 
     48  1.11  cherry #include "acpica.h"
     49  1.11  cherry #include "ioapic.h"
     50  1.11  cherry #include "lapic.h"
     51  1.11  cherry #include "pci.h"
     52  1.11  cherry 
     53  1.11  cherry #if NACPICA > 0
     54  1.11  cherry #include <dev/acpi/acpivar.h>
     55  1.11  cherry #endif
     56  1.11  cherry 
     57  1.11  cherry #if NIOAPIC > 0 || NACPICA > 0
     58  1.11  cherry #include <machine/i82093var.h>
     59  1.11  cherry #endif
     60  1.11  cherry 
     61  1.11  cherry #if NLAPIC > 0
     62  1.11  cherry #include <machine/i82489var.h>
     63  1.11  cherry #endif
     64  1.11  cherry 
     65  1.11  cherry #if NPCI > 0
     66  1.11  cherry #include <dev/pci/ppbreg.h>
     67  1.11  cherry #endif
     68  1.11  cherry 
     69  1.11  cherry void xen_disable_intr(void);
     70  1.11  cherry void xen_enable_intr(void);
     71  1.11  cherry u_long xen_read_psl(void);
     72  1.11  cherry void xen_write_psl(u_long);
     73  1.11  cherry 
     74   1.2  bouyer /*
     75   1.2  bouyer  * Add a mask to cpl, and return the old value of cpl.
     76   1.2  bouyer  */
     77   1.2  bouyer int
     78   1.2  bouyer splraise(int nlevel)
     79   1.2  bouyer {
     80   1.2  bouyer 	int olevel;
     81   1.2  bouyer 	struct cpu_info *ci = curcpu();
     82   1.2  bouyer 
     83   1.2  bouyer 	olevel = ci->ci_ilevel;
     84   1.2  bouyer 	if (nlevel > olevel)
     85   1.2  bouyer 		ci->ci_ilevel = nlevel;
     86   1.2  bouyer 	__insn_barrier();
     87   1.2  bouyer 	return (olevel);
     88   1.2  bouyer }
     89   1.2  bouyer 
     90   1.2  bouyer /*
     91   1.2  bouyer  * Restore a value to cpl (unmasking interrupts).  If any unmasked
     92   1.2  bouyer  * interrupts are pending, call Xspllower() to process them.
     93   1.2  bouyer  */
     94   1.2  bouyer void
     95   1.2  bouyer spllower(int nlevel)
     96   1.2  bouyer {
     97   1.2  bouyer 	struct cpu_info *ci = curcpu();
     98  1.11  cherry 	uint32_t xmask;
     99   1.2  bouyer 	u_long psl;
    100   1.2  bouyer 
    101   1.8  bouyer 	if (ci->ci_ilevel <= nlevel)
    102   1.8  bouyer 		return;
    103   1.8  bouyer 
    104   1.2  bouyer 	__insn_barrier();
    105   1.2  bouyer 
    106  1.11  cherry 	xmask = XUNMASK(ci, nlevel);
    107  1.11  cherry 	psl = xen_read_psl();
    108  1.11  cherry 	xen_disable_intr();
    109  1.11  cherry 	if (ci->ci_xpending & xmask) {
    110   1.7  bouyer 		KASSERT(psl == 0);
    111   1.2  bouyer 		Xspllower(nlevel);
    112   1.2  bouyer 		/* Xspllower does enable_intr() */
    113   1.2  bouyer 	} else {
    114   1.2  bouyer 		ci->ci_ilevel = nlevel;
    115  1.11  cherry 		xen_write_psl(psl);
    116   1.2  bouyer 	}
    117   1.2  bouyer }
    118   1.2  bouyer 
    119   1.2  bouyer void
    120  1.11  cherry xen_disable_intr(void)
    121   1.2  bouyer {
    122   1.2  bouyer 	__cli();
    123   1.2  bouyer }
    124   1.2  bouyer 
    125   1.2  bouyer void
    126  1.11  cherry xen_enable_intr(void)
    127   1.2  bouyer {
    128   1.2  bouyer 	__sti();
    129   1.2  bouyer }
    130   1.2  bouyer 
    131   1.2  bouyer u_long
    132  1.11  cherry xen_read_psl(void)
    133   1.2  bouyer {
    134   1.2  bouyer 
    135   1.4  cegger 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
    136   1.2  bouyer }
    137   1.2  bouyer 
    138   1.2  bouyer void
    139  1.11  cherry xen_write_psl(u_long psl)
    140   1.2  bouyer {
    141   1.4  cegger 	struct cpu_info *ci = curcpu();
    142   1.2  bouyer 
    143   1.4  cegger 	ci->ci_vcpu->evtchn_upcall_mask = psl;
    144   1.9     jym 	xen_rmb();
    145   1.4  cegger 	if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
    146   1.2  bouyer 	    	hypervisor_force_callback();
    147   1.2  bouyer 	}
    148   1.2  bouyer }
    149  1.10  cherry 
    150  1.10  cherry void *
    151  1.10  cherry xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
    152  1.10  cherry     int type, int level, int (*handler)(void *), void *arg,
    153  1.10  cherry     bool known_mpsafe)
    154  1.10  cherry {
    155  1.10  cherry 
    156  1.10  cherry 	return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
    157  1.10  cherry 	    handler, arg, known_mpsafe, "XEN");
    158  1.10  cherry }
    159  1.10  cherry 
    160  1.10  cherry void *
    161  1.10  cherry xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
    162  1.10  cherry     int type, int level, int (*handler)(void *), void *arg,
    163  1.10  cherry     bool known_mpsafe, const char *xname)
    164  1.10  cherry {
    165  1.10  cherry 	const char *intrstr;
    166  1.10  cherry 	char intrstr_buf[INTRIDBUF];
    167  1.10  cherry 
    168  1.10  cherry 	if (pic->pic_type == PIC_XEN) {
    169  1.10  cherry 		struct intrhand *rih;
    170  1.10  cherry 
    171  1.10  cherry 		/*
    172  1.10  cherry 		 * event_set_handler interprets `level != IPL_VM' to
    173  1.10  cherry 		 * mean MP-safe, so we require the caller to match that
    174  1.10  cherry 		 * for the moment.
    175  1.10  cherry 		 */
    176  1.10  cherry 		KASSERT(known_mpsafe == (level != IPL_VM));
    177  1.10  cherry 
    178  1.10  cherry 		intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
    179  1.10  cherry 		    sizeof(intrstr_buf));
    180  1.10  cherry 
    181  1.10  cherry 		event_set_handler(pin, handler, arg, level, intrstr, xname);
    182  1.10  cherry 
    183  1.10  cherry 		rih = kmem_zalloc(sizeof(*rih), cold ? KM_NOSLEEP : KM_SLEEP);
    184  1.10  cherry 		if (rih == NULL) {
    185  1.10  cherry 			printf("%s: can't allocate handler info\n", __func__);
    186  1.10  cherry 			return NULL;
    187  1.10  cherry 		}
    188  1.10  cherry 
    189  1.10  cherry 		/*
    190  1.10  cherry 		 * XXX:
    191  1.10  cherry 		 * This is just a copy for API conformance.
    192  1.10  cherry 		 * The real ih is lost in the innards of
    193  1.10  cherry 		 * event_set_handler(); where the details of
    194  1.10  cherry 		 * biglock_wrapper etc are taken care of.
    195  1.10  cherry 		 * All that goes away when we nuke event_set_handler()
    196  1.10  cherry 		 * et. al. and unify with x86/intr.c
    197  1.10  cherry 		 */
    198  1.10  cherry 		rih->ih_pin = pin; /* port */
    199  1.10  cherry 		rih->ih_fun = rih->ih_realfun = handler;
    200  1.10  cherry 		rih->ih_arg = rih->ih_realarg = arg;
    201  1.10  cherry 		rih->pic_type = pic->pic_type;
    202  1.10  cherry 		return rih;
    203  1.10  cherry 	} 	/* Else we assume pintr */
    204  1.10  cherry 
    205  1.10  cherry #if NPCI > 0 || NISA > 0
    206  1.10  cherry 	struct pintrhand *pih;
    207  1.10  cherry 	int gsi;
    208  1.10  cherry 	int vector, evtchn;
    209  1.10  cherry 
    210  1.10  cherry 	KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
    211  1.10  cherry 	    "bad legacy IRQ value: %d", legacy_irq);
    212  1.10  cherry 	KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
    213  1.10  cherry 	    "non-legacy IRQon i8259 ");
    214  1.10  cherry 
    215  1.10  cherry 	gsi = xen_pic_to_gsi(pic, pin);
    216  1.10  cherry 
    217  1.10  cherry 	intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
    218  1.10  cherry 	    sizeof(intrstr_buf));
    219  1.10  cherry 
    220  1.10  cherry 	vector = xen_vec_alloc(gsi);
    221  1.10  cherry 
    222  1.10  cherry 	if (irq2port[gsi] == 0) {
    223  1.10  cherry 		extern struct cpu_info phycpu_info_primary; /* XXX */
    224  1.10  cherry 		struct cpu_info *ci = &phycpu_info_primary;
    225  1.10  cherry 
    226  1.10  cherry 		pic->pic_addroute(pic, ci, pin, vector, type);
    227  1.10  cherry 
    228  1.10  cherry 		evtchn = bind_pirq_to_evtch(gsi);
    229  1.10  cherry 		KASSERT(evtchn > 0);
    230  1.10  cherry 		KASSERT(evtchn < NR_EVENT_CHANNELS);
    231  1.10  cherry 		irq2port[gsi] = evtchn + 1;
    232  1.10  cherry 		xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
    233  1.10  cherry 	} else {
    234  1.10  cherry 		/*
    235  1.10  cherry 		 * Shared interrupt - we can't rebind.
    236  1.10  cherry 		 * The port is shared instead.
    237  1.10  cherry 		 */
    238  1.10  cherry 		evtchn = irq2port[gsi] - 1;
    239  1.10  cherry 	}
    240  1.10  cherry 
    241  1.10  cherry 	pih = pirq_establish(gsi, evtchn, handler, arg, level,
    242  1.10  cherry 			     intrstr, xname);
    243  1.10  cherry 	pih->pic_type = pic->pic_type;
    244  1.10  cherry 	return pih;
    245  1.10  cherry #endif /* NPCI > 0 || NISA > 0 */
    246  1.10  cherry 
    247  1.10  cherry 	/* FALLTHROUGH */
    248  1.10  cherry 	return NULL;
    249  1.10  cherry }
    250  1.10  cherry 
    251  1.10  cherry /*
    252  1.10  cherry  * Deregister an interrupt handler.
    253  1.10  cherry  */
    254  1.10  cherry void
    255  1.10  cherry xen_intr_disestablish(struct intrhand *ih)
    256  1.10  cherry {
    257  1.10  cherry 
    258  1.10  cherry 	if (ih->pic_type == PIC_XEN) {
    259  1.10  cherry 		event_remove_handler(ih->ih_pin, ih->ih_realfun,
    260  1.10  cherry 		    ih->ih_realarg);
    261  1.10  cherry 		kmem_free(ih, sizeof(*ih));
    262  1.10  cherry 		return;
    263  1.10  cherry 	}
    264  1.10  cherry #if defined(DOM0OPS)
    265  1.10  cherry 	/*
    266  1.10  cherry 	 * Cache state, to prevent a use after free situation with
    267  1.10  cherry 	 * ih.
    268  1.10  cherry 	 */
    269  1.10  cherry 
    270  1.10  cherry 	struct pintrhand *pih = (struct pintrhand *)ih;
    271  1.10  cherry 
    272  1.10  cherry 	int pirq = pih->pirq;
    273  1.10  cherry 	int port = pih->evtch;
    274  1.10  cherry 	KASSERT(irq2port[pirq] != 0);
    275  1.10  cherry 
    276  1.10  cherry 	pirq_disestablish(pih);
    277  1.10  cherry 
    278  1.10  cherry 	if (evtsource[port] == NULL) {
    279  1.10  cherry 			/*
    280  1.10  cherry 			 * Last handler was removed by
    281  1.10  cherry 			 * event_remove_handler().
    282  1.10  cherry 			 *
    283  1.10  cherry 			 * We can safely unbind the pirq now.
    284  1.10  cherry 			 */
    285  1.10  cherry 
    286  1.10  cherry 			port = unbind_pirq_from_evtch(pirq);
    287  1.10  cherry 			KASSERT(port == pih->evtch);
    288  1.10  cherry 			irq2port[pirq] = 0;
    289  1.10  cherry 	}
    290  1.10  cherry #endif
    291  1.10  cherry 	return;
    292  1.10  cherry }
    293  1.10  cherry 
    294  1.11  cherry /* MI interface for kern_cpu.c */
    295  1.11  cherry void xen_cpu_intr_redistribute(void);
    296  1.11  cherry 
    297  1.11  cherry void
    298  1.11  cherry xen_cpu_intr_redistribute(void)
    299  1.11  cherry {
    300  1.11  cherry 	KASSERT(mutex_owned(&cpu_lock));
    301  1.11  cherry 	KASSERT(mp_online);
    302  1.11  cherry 
    303  1.11  cherry 	return;
    304  1.11  cherry }
    305  1.11  cherry 
    306  1.11  cherry /* MD - called by x86/cpu.c */
    307  1.12  cherry #if defined(INTRSTACKSIZE)
    308  1.12  cherry static inline bool
    309  1.12  cherry redzone_const_or_false(bool x)
    310  1.12  cherry {
    311  1.12  cherry #ifdef DIAGNOSTIC
    312  1.12  cherry 	return x;
    313  1.12  cherry #else
    314  1.12  cherry 	return false;
    315  1.12  cherry #endif /* !DIAGNOSTIC */
    316  1.12  cherry }
    317  1.12  cherry 
    318  1.12  cherry static inline int
    319  1.12  cherry redzone_const_or_zero(int x)
    320  1.12  cherry {
    321  1.12  cherry 	return redzone_const_or_false(true) ? x : 0;
    322  1.12  cherry }
    323  1.12  cherry #endif
    324  1.12  cherry 
    325  1.11  cherry void
    326  1.11  cherry cpu_intr_init(struct cpu_info *ci)
    327  1.11  cherry {
    328  1.11  cherry 	int i; /* XXX: duplicate */
    329  1.11  cherry 
    330  1.11  cherry 	ci->ci_xunmask[0] = 0xfffffffe;
    331  1.11  cherry 	for (i = 1; i < NIPL; i++)
    332  1.11  cherry 		ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i);
    333  1.11  cherry 
    334  1.11  cherry #if defined(INTRSTACKSIZE)
    335  1.11  cherry 	vaddr_t istack;
    336  1.11  cherry 
    337  1.11  cherry 	/*
    338  1.11  cherry 	 * If the red zone is activated, protect both the top and
    339  1.11  cherry 	 * the bottom of the stack with an unmapped page.
    340  1.11  cherry 	 */
    341  1.11  cherry 	istack = uvm_km_alloc(kernel_map,
    342  1.11  cherry 	    INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
    343  1.11  cherry 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
    344  1.11  cherry 	if (redzone_const_or_false(true)) {
    345  1.11  cherry 		pmap_kremove(istack, PAGE_SIZE);
    346  1.11  cherry 		pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
    347  1.11  cherry 		pmap_update(pmap_kernel());
    348  1.11  cherry 	}
    349  1.11  cherry 
    350  1.11  cherry 	/*
    351  1.11  cherry 	 * 33 used to be 1.  Arbitrarily reserve 32 more register_t's
    352  1.11  cherry 	 * of space for ddb(4) to examine some subroutine arguments
    353  1.11  cherry 	 * and to hunt for the next stack frame.
    354  1.11  cherry 	 */
    355  1.11  cherry 	ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
    356  1.11  cherry 	    INTRSTACKSIZE - 33 * sizeof(register_t);
    357  1.11  cherry #endif
    358  1.11  cherry 
    359  1.11  cherry 	ci->ci_idepth = -1;
    360  1.11  cherry }
    361  1.11  cherry 
    362  1.11  cherry /*
    363  1.11  cherry  * Everything below from here is duplicated from x86/intr.c
    364  1.11  cherry  * When intr.c and xen_intr.c are unified, these will need to be
    365  1.11  cherry  * merged.
    366  1.11  cherry  */
    367  1.11  cherry 
    368  1.11  cherry u_int xen_cpu_intr_count(struct cpu_info *ci);
    369  1.11  cherry 
    370  1.11  cherry u_int
    371  1.11  cherry xen_cpu_intr_count(struct cpu_info *ci)
    372  1.11  cherry {
    373  1.11  cherry 
    374  1.11  cherry 	KASSERT(ci->ci_nintrhand >= 0);
    375  1.11  cherry 
    376  1.11  cherry 	return ci->ci_nintrhand;
    377  1.11  cherry }
    378  1.11  cherry 
    379  1.11  cherry static const char *
    380  1.11  cherry xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
    381  1.11  cherry {
    382  1.11  cherry 	KASSERT(pic->pic_type == PIC_XEN);
    383  1.11  cherry 
    384  1.11  cherry 	KASSERT(port >= 0);
    385  1.11  cherry 	KASSERT(port < NR_EVENT_CHANNELS);
    386  1.11  cherry 
    387  1.11  cherry 	snprintf(buf, len, "%s channel %d", pic->pic_name, port);
    388  1.11  cherry 
    389  1.11  cherry 	return buf;
    390  1.11  cherry }
    391  1.11  cherry 
    392  1.11  cherry static const char *
    393  1.11  cherry legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
    394  1.11  cherry {
    395  1.11  cherry 	int legacy_irq;
    396  1.11  cherry 
    397  1.11  cherry 	KASSERT(pic->pic_type == PIC_I8259);
    398  1.11  cherry #if NLAPIC > 0
    399  1.11  cherry 	KASSERT(APIC_IRQ_ISLEGACY(ih));
    400  1.11  cherry 
    401  1.11  cherry 	legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
    402  1.11  cherry #else
    403  1.11  cherry 	legacy_irq = ih;
    404  1.11  cherry #endif
    405  1.11  cherry 	KASSERT(legacy_irq >= 0 && legacy_irq < 16);
    406  1.11  cherry 
    407  1.11  cherry 	snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
    408  1.11  cherry 
    409  1.11  cherry 	return buf;
    410  1.11  cherry }
    411  1.11  cherry 
    412  1.11  cherry const char *
    413  1.11  cherry intr_string(intr_handle_t ih, char *buf, size_t len)
    414  1.11  cherry {
    415  1.11  cherry #if NIOAPIC > 0
    416  1.11  cherry 	struct ioapic_softc *pic;
    417  1.11  cherry #endif
    418  1.11  cherry 
    419  1.11  cherry 	if (ih == 0)
    420  1.11  cherry 		panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
    421  1.11  cherry 
    422  1.11  cherry #if NIOAPIC > 0
    423  1.11  cherry 	if (ih & APIC_INT_VIA_APIC) {
    424  1.11  cherry 		pic = ioapic_find(APIC_IRQ_APIC(ih));
    425  1.11  cherry 		if (pic != NULL) {
    426  1.11  cherry 			snprintf(buf, len, "%s pin %d",
    427  1.11  cherry 			    device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
    428  1.11  cherry 		} else {
    429  1.11  cherry 			snprintf(buf, len,
    430  1.11  cherry 			    "apic %d int %d (irq %d)",
    431  1.11  cherry 			    APIC_IRQ_APIC(ih),
    432  1.11  cherry 			    APIC_IRQ_PIN(ih),
    433  1.11  cherry 			    APIC_IRQ_LEGACY_IRQ(ih));
    434  1.11  cherry 		}
    435  1.11  cherry 	} else
    436  1.11  cherry 		snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
    437  1.11  cherry 
    438  1.11  cherry #elif NLAPIC > 0
    439  1.11  cherry 	snprintf(buf, len, "irq %d" APIC_IRQ_LEGACY_IRQ(ih));
    440  1.11  cherry #else
    441  1.11  cherry 	snprintf(buf, len, "irq %d", (int) ih);
    442  1.11  cherry #endif
    443  1.11  cherry 	return buf;
    444  1.11  cherry 
    445  1.11  cherry }
    446  1.11  cherry 
    447  1.11  cherry /*
    448  1.11  cherry  * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
    449  1.11  cherry  * by MI code and intrctl(8).
    450  1.11  cherry  */
    451  1.11  cherry const char *
    452  1.11  cherry intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
    453  1.11  cherry {
    454  1.11  cherry 	int ih = 0;
    455  1.11  cherry 
    456  1.11  cherry #if NPCI > 0
    457  1.11  cherry #if defined(__HAVE_PCI_MSI_MSIX)
    458  1.11  cherry 	if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
    459  1.11  cherry 		uint64_t pih;
    460  1.11  cherry 		int dev, vec;
    461  1.11  cherry 
    462  1.11  cherry 		dev = msipic_get_devid(pic);
    463  1.11  cherry 		vec = pin;
    464  1.11  cherry 		pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
    465  1.11  cherry 			| __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
    466  1.11  cherry 			| APIC_INT_VIA_MSI;
    467  1.11  cherry 		if (pic->pic_type == PIC_MSI)
    468  1.11  cherry 			MSI_INT_MAKE_MSI(pih);
    469  1.11  cherry 		else if (pic->pic_type == PIC_MSIX)
    470  1.11  cherry 			MSI_INT_MAKE_MSIX(pih);
    471  1.11  cherry 
    472  1.11  cherry 		return x86_pci_msi_string(NULL, pih, buf, len);
    473  1.11  cherry 	}
    474  1.11  cherry #endif /* __HAVE_PCI_MSI_MSIX */
    475  1.11  cherry #endif
    476  1.11  cherry 
    477  1.11  cherry 	if (pic->pic_type == PIC_XEN) {
    478  1.11  cherry 		ih = pin;	/* Port == pin */
    479  1.11  cherry 		return xen_intr_string(pin, buf, len, pic);
    480  1.11  cherry 	}
    481  1.11  cherry 
    482  1.11  cherry 	/*
    483  1.11  cherry 	 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
    484  1.11  cherry 	 * is only used in intr_string() to show the irq number.
    485  1.11  cherry 	 * If the device is "legacy"(such as floppy), it should not use
    486  1.11  cherry 	 * intr_string().
    487  1.11  cherry 	 */
    488  1.11  cherry 	if (pic->pic_type == PIC_I8259) {
    489  1.11  cherry 		ih = legacy_irq;
    490  1.11  cherry 		return legacy_intr_string(ih, buf, len, pic);
    491  1.11  cherry 	}
    492  1.11  cherry 
    493  1.11  cherry #if NIOAPIC > 0 || NACPICA > 0
    494  1.11  cherry 	ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
    495  1.11  cherry 	    | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
    496  1.11  cherry 	if (pic->pic_type == PIC_IOAPIC) {
    497  1.11  cherry 		ih |= APIC_INT_VIA_APIC;
    498  1.11  cherry 	}
    499  1.11  cherry 	ih |= pin;
    500  1.11  cherry 	return intr_string(ih, buf, len);
    501  1.11  cherry #endif
    502  1.11  cherry 
    503  1.11  cherry 	return NULL; /* No pic found! */
    504  1.11  cherry }
    505  1.11  cherry 
    506  1.11  cherry __weak_alias(x86_disable_intr, xen_disable_intr);
    507  1.11  cherry __weak_alias(x86_enable_intr, xen_enable_intr);
    508  1.11  cherry __weak_alias(x86_read_psl, xen_read_psl);
    509  1.11  cherry __weak_alias(x86_write_psl, xen_write_psl);
    510  1.11  cherry 
    511  1.10  cherry __weak_alias(intr_establish, xen_intr_establish);
    512  1.10  cherry __weak_alias(intr_establish_xname, xen_intr_establish_xname);
    513  1.10  cherry __weak_alias(intr_disestablish, xen_intr_disestablish);
    514  1.11  cherry __weak_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
    515  1.11  cherry __weak_alias(cpu_intr_count, xen_cpu_intr_count);
    516  1.11  cherry 
    517