xen_intr.c revision 1.23 1 1.23 jdolecek /* $NetBSD: xen_intr.c,v 1.23 2020/04/21 19:03:51 jdolecek Exp $ */
2 1.2 bouyer
3 1.2 bouyer /*-
4 1.2 bouyer * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 1.2 bouyer * All rights reserved.
6 1.2 bouyer *
7 1.2 bouyer * This code is derived from software contributed to The NetBSD Foundation
8 1.2 bouyer * by Charles M. Hannum, and by Jason R. Thorpe.
9 1.2 bouyer *
10 1.2 bouyer * Redistribution and use in source and binary forms, with or without
11 1.2 bouyer * modification, are permitted provided that the following conditions
12 1.2 bouyer * are met:
13 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
14 1.2 bouyer * notice, this list of conditions and the following disclaimer.
15 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
17 1.2 bouyer * documentation and/or other materials provided with the distribution.
18 1.2 bouyer *
19 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 bouyer * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 bouyer * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 bouyer * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 bouyer * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 bouyer * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 bouyer * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 bouyer * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 bouyer * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 bouyer * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 bouyer * POSSIBILITY OF SUCH DAMAGE.
30 1.2 bouyer */
31 1.2 bouyer
32 1.2 bouyer #include <sys/cdefs.h>
33 1.23 jdolecek __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.23 2020/04/21 19:03:51 jdolecek Exp $");
34 1.19 ad
35 1.19 ad #include "opt_multiprocessor.h"
36 1.2 bouyer
37 1.2 bouyer #include <sys/param.h>
38 1.10 cherry #include <sys/kernel.h>
39 1.10 cherry #include <sys/kmem.h>
40 1.11 cherry #include <sys/cpu.h>
41 1.19 ad #include <sys/device.h>
42 1.11 cherry
43 1.10 cherry #include <xen/evtchn.h>
44 1.15 cherry #include <xen/xenfunc.h>
45 1.2 bouyer
46 1.12 cherry #include <uvm/uvm.h>
47 1.12 cherry
48 1.2 bouyer #include <machine/cpu.h>
49 1.2 bouyer #include <machine/intr.h>
50 1.2 bouyer
51 1.11 cherry #include "acpica.h"
52 1.11 cherry #include "ioapic.h"
53 1.11 cherry #include "lapic.h"
54 1.11 cherry #include "pci.h"
55 1.11 cherry
56 1.11 cherry #if NACPICA > 0
57 1.11 cherry #include <dev/acpi/acpivar.h>
58 1.11 cherry #endif
59 1.11 cherry
60 1.11 cherry #if NIOAPIC > 0 || NACPICA > 0
61 1.11 cherry #include <machine/i82093var.h>
62 1.11 cherry #endif
63 1.11 cherry
64 1.11 cherry #if NLAPIC > 0
65 1.11 cherry #include <machine/i82489var.h>
66 1.11 cherry #endif
67 1.11 cherry
68 1.11 cherry #if NPCI > 0
69 1.11 cherry #include <dev/pci/ppbreg.h>
70 1.23 jdolecek #ifdef __HAVE_PCI_MSI_MSIX
71 1.23 jdolecek #include <x86/pci/msipic.h>
72 1.23 jdolecek #include <x86/pci/pci_msi_machdep.h>
73 1.23 jdolecek #endif
74 1.11 cherry #endif
75 1.11 cherry
76 1.19 ad #if defined(MULTIPROCESSOR)
77 1.19 ad static const char *xen_ipi_names[XEN_NIPIS] = XEN_IPI_NAMES;
78 1.19 ad #endif
79 1.19 ad
80 1.2 bouyer /*
81 1.2 bouyer * Restore a value to cpl (unmasking interrupts). If any unmasked
82 1.2 bouyer * interrupts are pending, call Xspllower() to process them.
83 1.2 bouyer */
84 1.14 cherry void xen_spllower(int nlevel);
85 1.14 cherry
86 1.2 bouyer void
87 1.14 cherry xen_spllower(int nlevel)
88 1.2 bouyer {
89 1.2 bouyer struct cpu_info *ci = curcpu();
90 1.11 cherry uint32_t xmask;
91 1.2 bouyer u_long psl;
92 1.2 bouyer
93 1.8 bouyer if (ci->ci_ilevel <= nlevel)
94 1.8 bouyer return;
95 1.8 bouyer
96 1.2 bouyer __insn_barrier();
97 1.2 bouyer
98 1.11 cherry xmask = XUNMASK(ci, nlevel);
99 1.11 cherry psl = xen_read_psl();
100 1.16 bouyer x86_disable_intr();
101 1.11 cherry if (ci->ci_xpending & xmask) {
102 1.7 bouyer KASSERT(psl == 0);
103 1.2 bouyer Xspllower(nlevel);
104 1.2 bouyer /* Xspllower does enable_intr() */
105 1.2 bouyer } else {
106 1.2 bouyer ci->ci_ilevel = nlevel;
107 1.11 cherry xen_write_psl(psl);
108 1.2 bouyer }
109 1.2 bouyer }
110 1.2 bouyer
111 1.16 bouyer
112 1.17 cherry #if !defined(XENPVHVM)
113 1.2 bouyer void
114 1.16 bouyer x86_disable_intr(void)
115 1.2 bouyer {
116 1.16 bouyer curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
117 1.16 bouyer x86_lfence();
118 1.2 bouyer }
119 1.2 bouyer
120 1.2 bouyer void
121 1.16 bouyer x86_enable_intr(void)
122 1.2 bouyer {
123 1.16 bouyer volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
124 1.16 bouyer __insn_barrier();
125 1.16 bouyer _vci->evtchn_upcall_mask = 0;
126 1.16 bouyer x86_lfence(); /* unmask then check (avoid races) */
127 1.16 bouyer if (__predict_false(_vci->evtchn_upcall_pending))
128 1.16 bouyer hypervisor_force_callback();
129 1.2 bouyer }
130 1.2 bouyer
131 1.17 cherry #endif /* !XENPVHVM */
132 1.17 cherry
133 1.2 bouyer u_long
134 1.11 cherry xen_read_psl(void)
135 1.2 bouyer {
136 1.2 bouyer
137 1.4 cegger return (curcpu()->ci_vcpu->evtchn_upcall_mask);
138 1.2 bouyer }
139 1.2 bouyer
140 1.2 bouyer void
141 1.11 cherry xen_write_psl(u_long psl)
142 1.2 bouyer {
143 1.4 cegger struct cpu_info *ci = curcpu();
144 1.2 bouyer
145 1.4 cegger ci->ci_vcpu->evtchn_upcall_mask = psl;
146 1.9 jym xen_rmb();
147 1.4 cegger if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
148 1.2 bouyer hypervisor_force_callback();
149 1.2 bouyer }
150 1.2 bouyer }
151 1.10 cherry
152 1.10 cherry void *
153 1.10 cherry xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
154 1.10 cherry int type, int level, int (*handler)(void *), void *arg,
155 1.10 cherry bool known_mpsafe)
156 1.10 cherry {
157 1.10 cherry
158 1.10 cherry return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
159 1.10 cherry handler, arg, known_mpsafe, "XEN");
160 1.10 cherry }
161 1.10 cherry
162 1.10 cherry void *
163 1.10 cherry xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
164 1.10 cherry int type, int level, int (*handler)(void *), void *arg,
165 1.10 cherry bool known_mpsafe, const char *xname)
166 1.10 cherry {
167 1.10 cherry const char *intrstr;
168 1.10 cherry char intrstr_buf[INTRIDBUF];
169 1.10 cherry
170 1.10 cherry if (pic->pic_type == PIC_XEN) {
171 1.10 cherry struct intrhand *rih;
172 1.10 cherry
173 1.10 cherry intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
174 1.10 cherry sizeof(intrstr_buf));
175 1.10 cherry
176 1.20 jdolecek event_set_handler(pin, handler, arg, level, intrstr, xname,
177 1.22 bouyer known_mpsafe, true);
178 1.10 cherry
179 1.10 cherry rih = kmem_zalloc(sizeof(*rih), cold ? KM_NOSLEEP : KM_SLEEP);
180 1.10 cherry if (rih == NULL) {
181 1.10 cherry printf("%s: can't allocate handler info\n", __func__);
182 1.10 cherry return NULL;
183 1.10 cherry }
184 1.10 cherry
185 1.10 cherry /*
186 1.10 cherry * XXX:
187 1.10 cherry * This is just a copy for API conformance.
188 1.10 cherry * The real ih is lost in the innards of
189 1.10 cherry * event_set_handler(); where the details of
190 1.10 cherry * biglock_wrapper etc are taken care of.
191 1.10 cherry * All that goes away when we nuke event_set_handler()
192 1.10 cherry * et. al. and unify with x86/intr.c
193 1.10 cherry */
194 1.10 cherry rih->ih_pin = pin; /* port */
195 1.10 cherry rih->ih_fun = rih->ih_realfun = handler;
196 1.10 cherry rih->ih_arg = rih->ih_realarg = arg;
197 1.10 cherry rih->pic_type = pic->pic_type;
198 1.10 cherry return rih;
199 1.10 cherry } /* Else we assume pintr */
200 1.10 cherry
201 1.14 cherry #if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */
202 1.10 cherry struct pintrhand *pih;
203 1.10 cherry int gsi;
204 1.10 cherry int vector, evtchn;
205 1.10 cherry
206 1.10 cherry KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
207 1.10 cherry "bad legacy IRQ value: %d", legacy_irq);
208 1.10 cherry KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
209 1.10 cherry "non-legacy IRQon i8259 ");
210 1.10 cherry
211 1.10 cherry gsi = xen_pic_to_gsi(pic, pin);
212 1.10 cherry
213 1.10 cherry intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
214 1.10 cherry sizeof(intrstr_buf));
215 1.10 cherry
216 1.10 cherry vector = xen_vec_alloc(gsi);
217 1.10 cherry
218 1.10 cherry if (irq2port[gsi] == 0) {
219 1.10 cherry extern struct cpu_info phycpu_info_primary; /* XXX */
220 1.10 cherry struct cpu_info *ci = &phycpu_info_primary;
221 1.10 cherry
222 1.10 cherry pic->pic_addroute(pic, ci, pin, vector, type);
223 1.10 cherry
224 1.10 cherry evtchn = bind_pirq_to_evtch(gsi);
225 1.10 cherry KASSERT(evtchn > 0);
226 1.10 cherry KASSERT(evtchn < NR_EVENT_CHANNELS);
227 1.10 cherry irq2port[gsi] = evtchn + 1;
228 1.10 cherry xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
229 1.10 cherry } else {
230 1.10 cherry /*
231 1.10 cherry * Shared interrupt - we can't rebind.
232 1.10 cherry * The port is shared instead.
233 1.10 cherry */
234 1.10 cherry evtchn = irq2port[gsi] - 1;
235 1.10 cherry }
236 1.10 cherry
237 1.10 cherry pih = pirq_establish(gsi, evtchn, handler, arg, level,
238 1.21 jdolecek intrstr, xname, known_mpsafe);
239 1.10 cherry pih->pic_type = pic->pic_type;
240 1.10 cherry return pih;
241 1.10 cherry #endif /* NPCI > 0 || NISA > 0 */
242 1.10 cherry
243 1.10 cherry /* FALLTHROUGH */
244 1.10 cherry return NULL;
245 1.10 cherry }
246 1.10 cherry
247 1.10 cherry /*
248 1.18 thorpej * Mask an interrupt source.
249 1.18 thorpej */
250 1.18 thorpej void
251 1.18 thorpej xen_intr_mask(struct intrhand *ih)
252 1.18 thorpej {
253 1.18 thorpej /* XXX */
254 1.18 thorpej panic("xen_intr_mask: not yet implemented.");
255 1.18 thorpej }
256 1.18 thorpej
257 1.18 thorpej /*
258 1.18 thorpej * Unmask an interrupt source.
259 1.18 thorpej */
260 1.18 thorpej void
261 1.18 thorpej xen_intr_unmask(struct intrhand *ih)
262 1.18 thorpej {
263 1.18 thorpej /* XXX */
264 1.18 thorpej panic("xen_intr_unmask: not yet implemented.");
265 1.18 thorpej }
266 1.18 thorpej
267 1.18 thorpej /*
268 1.10 cherry * Deregister an interrupt handler.
269 1.10 cherry */
270 1.10 cherry void
271 1.10 cherry xen_intr_disestablish(struct intrhand *ih)
272 1.10 cherry {
273 1.10 cherry
274 1.10 cherry if (ih->pic_type == PIC_XEN) {
275 1.10 cherry event_remove_handler(ih->ih_pin, ih->ih_realfun,
276 1.10 cherry ih->ih_realarg);
277 1.10 cherry kmem_free(ih, sizeof(*ih));
278 1.10 cherry return;
279 1.10 cherry }
280 1.10 cherry #if defined(DOM0OPS)
281 1.10 cherry /*
282 1.10 cherry * Cache state, to prevent a use after free situation with
283 1.10 cherry * ih.
284 1.10 cherry */
285 1.10 cherry
286 1.10 cherry struct pintrhand *pih = (struct pintrhand *)ih;
287 1.10 cherry
288 1.10 cherry int pirq = pih->pirq;
289 1.10 cherry int port = pih->evtch;
290 1.10 cherry KASSERT(irq2port[pirq] != 0);
291 1.10 cherry
292 1.10 cherry pirq_disestablish(pih);
293 1.10 cherry
294 1.10 cherry if (evtsource[port] == NULL) {
295 1.10 cherry /*
296 1.10 cherry * Last handler was removed by
297 1.10 cherry * event_remove_handler().
298 1.10 cherry *
299 1.10 cherry * We can safely unbind the pirq now.
300 1.10 cherry */
301 1.10 cherry
302 1.10 cherry port = unbind_pirq_from_evtch(pirq);
303 1.10 cherry KASSERT(port == pih->evtch);
304 1.10 cherry irq2port[pirq] = 0;
305 1.10 cherry }
306 1.10 cherry #endif
307 1.10 cherry return;
308 1.10 cherry }
309 1.10 cherry
310 1.11 cherry /* MI interface for kern_cpu.c */
311 1.11 cherry void xen_cpu_intr_redistribute(void);
312 1.11 cherry
313 1.11 cherry void
314 1.11 cherry xen_cpu_intr_redistribute(void)
315 1.11 cherry {
316 1.11 cherry KASSERT(mutex_owned(&cpu_lock));
317 1.11 cherry KASSERT(mp_online);
318 1.11 cherry
319 1.11 cherry return;
320 1.11 cherry }
321 1.11 cherry
322 1.11 cherry /* MD - called by x86/cpu.c */
323 1.12 cherry #if defined(INTRSTACKSIZE)
324 1.12 cherry static inline bool
325 1.12 cherry redzone_const_or_false(bool x)
326 1.12 cherry {
327 1.12 cherry #ifdef DIAGNOSTIC
328 1.12 cherry return x;
329 1.12 cherry #else
330 1.12 cherry return false;
331 1.12 cherry #endif /* !DIAGNOSTIC */
332 1.12 cherry }
333 1.12 cherry
334 1.12 cherry static inline int
335 1.12 cherry redzone_const_or_zero(int x)
336 1.12 cherry {
337 1.12 cherry return redzone_const_or_false(true) ? x : 0;
338 1.12 cherry }
339 1.12 cherry #endif
340 1.12 cherry
341 1.14 cherry void xen_cpu_intr_init(struct cpu_info *);
342 1.11 cherry void
343 1.14 cherry xen_cpu_intr_init(struct cpu_info *ci)
344 1.11 cherry {
345 1.11 cherry int i; /* XXX: duplicate */
346 1.11 cherry
347 1.11 cherry ci->ci_xunmask[0] = 0xfffffffe;
348 1.11 cherry for (i = 1; i < NIPL; i++)
349 1.11 cherry ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i);
350 1.11 cherry
351 1.11 cherry #if defined(INTRSTACKSIZE)
352 1.11 cherry vaddr_t istack;
353 1.11 cherry
354 1.11 cherry /*
355 1.11 cherry * If the red zone is activated, protect both the top and
356 1.11 cherry * the bottom of the stack with an unmapped page.
357 1.11 cherry */
358 1.11 cherry istack = uvm_km_alloc(kernel_map,
359 1.11 cherry INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
360 1.11 cherry UVM_KMF_WIRED|UVM_KMF_ZERO);
361 1.11 cherry if (redzone_const_or_false(true)) {
362 1.11 cherry pmap_kremove(istack, PAGE_SIZE);
363 1.11 cherry pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
364 1.11 cherry pmap_update(pmap_kernel());
365 1.11 cherry }
366 1.11 cherry
367 1.11 cherry /*
368 1.11 cherry * 33 used to be 1. Arbitrarily reserve 32 more register_t's
369 1.11 cherry * of space for ddb(4) to examine some subroutine arguments
370 1.11 cherry * and to hunt for the next stack frame.
371 1.11 cherry */
372 1.11 cherry ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
373 1.11 cherry INTRSTACKSIZE - 33 * sizeof(register_t);
374 1.11 cherry #endif
375 1.11 cherry
376 1.19 ad #ifdef MULTIPROCESSOR
377 1.19 ad for (i = 0; i < XEN_NIPIS; i++)
378 1.19 ad evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
379 1.19 ad NULL, device_xname(ci->ci_dev), xen_ipi_names[i]);
380 1.19 ad #endif
381 1.19 ad
382 1.11 cherry ci->ci_idepth = -1;
383 1.11 cherry }
384 1.11 cherry
385 1.11 cherry /*
386 1.11 cherry * Everything below from here is duplicated from x86/intr.c
387 1.11 cherry * When intr.c and xen_intr.c are unified, these will need to be
388 1.11 cherry * merged.
389 1.11 cherry */
390 1.11 cherry
391 1.11 cherry u_int xen_cpu_intr_count(struct cpu_info *ci);
392 1.11 cherry
393 1.11 cherry u_int
394 1.11 cherry xen_cpu_intr_count(struct cpu_info *ci)
395 1.11 cherry {
396 1.11 cherry
397 1.11 cherry KASSERT(ci->ci_nintrhand >= 0);
398 1.11 cherry
399 1.11 cherry return ci->ci_nintrhand;
400 1.11 cherry }
401 1.11 cherry
402 1.11 cherry static const char *
403 1.11 cherry xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
404 1.11 cherry {
405 1.11 cherry KASSERT(pic->pic_type == PIC_XEN);
406 1.11 cherry
407 1.11 cherry KASSERT(port >= 0);
408 1.11 cherry KASSERT(port < NR_EVENT_CHANNELS);
409 1.11 cherry
410 1.11 cherry snprintf(buf, len, "%s channel %d", pic->pic_name, port);
411 1.11 cherry
412 1.11 cherry return buf;
413 1.11 cherry }
414 1.11 cherry
415 1.11 cherry static const char *
416 1.11 cherry legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
417 1.11 cherry {
418 1.11 cherry int legacy_irq;
419 1.11 cherry
420 1.11 cherry KASSERT(pic->pic_type == PIC_I8259);
421 1.11 cherry #if NLAPIC > 0
422 1.11 cherry KASSERT(APIC_IRQ_ISLEGACY(ih));
423 1.11 cherry
424 1.11 cherry legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
425 1.11 cherry #else
426 1.11 cherry legacy_irq = ih;
427 1.11 cherry #endif
428 1.11 cherry KASSERT(legacy_irq >= 0 && legacy_irq < 16);
429 1.11 cherry
430 1.11 cherry snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
431 1.11 cherry
432 1.11 cherry return buf;
433 1.11 cherry }
434 1.11 cherry
435 1.14 cherry const char * xintr_string(intr_handle_t ih, char *buf, size_t len);
436 1.14 cherry
437 1.11 cherry const char *
438 1.14 cherry xintr_string(intr_handle_t ih, char *buf, size_t len)
439 1.11 cherry {
440 1.11 cherry #if NIOAPIC > 0
441 1.11 cherry struct ioapic_softc *pic;
442 1.11 cherry #endif
443 1.11 cherry
444 1.11 cherry if (ih == 0)
445 1.11 cherry panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
446 1.11 cherry
447 1.11 cherry #if NIOAPIC > 0
448 1.11 cherry if (ih & APIC_INT_VIA_APIC) {
449 1.11 cherry pic = ioapic_find(APIC_IRQ_APIC(ih));
450 1.11 cherry if (pic != NULL) {
451 1.11 cherry snprintf(buf, len, "%s pin %d",
452 1.11 cherry device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
453 1.11 cherry } else {
454 1.11 cherry snprintf(buf, len,
455 1.11 cherry "apic %d int %d (irq %d)",
456 1.11 cherry APIC_IRQ_APIC(ih),
457 1.11 cherry APIC_IRQ_PIN(ih),
458 1.11 cherry APIC_IRQ_LEGACY_IRQ(ih));
459 1.11 cherry }
460 1.11 cherry } else
461 1.11 cherry snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
462 1.11 cherry
463 1.11 cherry #elif NLAPIC > 0
464 1.14 cherry snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
465 1.11 cherry #else
466 1.11 cherry snprintf(buf, len, "irq %d", (int) ih);
467 1.11 cherry #endif
468 1.11 cherry return buf;
469 1.11 cherry
470 1.11 cherry }
471 1.11 cherry
472 1.11 cherry /*
473 1.11 cherry * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
474 1.11 cherry * by MI code and intrctl(8).
475 1.11 cherry */
476 1.14 cherry const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic,
477 1.14 cherry int pin, char *buf, size_t len);
478 1.14 cherry
479 1.11 cherry const char *
480 1.14 cherry xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
481 1.11 cherry {
482 1.11 cherry int ih = 0;
483 1.11 cherry
484 1.11 cherry #if NPCI > 0
485 1.11 cherry #if defined(__HAVE_PCI_MSI_MSIX)
486 1.11 cherry if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
487 1.11 cherry uint64_t pih;
488 1.11 cherry int dev, vec;
489 1.11 cherry
490 1.11 cherry dev = msipic_get_devid(pic);
491 1.11 cherry vec = pin;
492 1.11 cherry pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
493 1.11 cherry | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
494 1.11 cherry | APIC_INT_VIA_MSI;
495 1.11 cherry if (pic->pic_type == PIC_MSI)
496 1.11 cherry MSI_INT_MAKE_MSI(pih);
497 1.11 cherry else if (pic->pic_type == PIC_MSIX)
498 1.11 cherry MSI_INT_MAKE_MSIX(pih);
499 1.11 cherry
500 1.11 cherry return x86_pci_msi_string(NULL, pih, buf, len);
501 1.11 cherry }
502 1.11 cherry #endif /* __HAVE_PCI_MSI_MSIX */
503 1.11 cherry #endif
504 1.11 cherry
505 1.11 cherry if (pic->pic_type == PIC_XEN) {
506 1.11 cherry ih = pin; /* Port == pin */
507 1.11 cherry return xen_intr_string(pin, buf, len, pic);
508 1.11 cherry }
509 1.11 cherry
510 1.11 cherry /*
511 1.11 cherry * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
512 1.11 cherry * is only used in intr_string() to show the irq number.
513 1.11 cherry * If the device is "legacy"(such as floppy), it should not use
514 1.11 cherry * intr_string().
515 1.11 cherry */
516 1.11 cherry if (pic->pic_type == PIC_I8259) {
517 1.11 cherry ih = legacy_irq;
518 1.11 cherry return legacy_intr_string(ih, buf, len, pic);
519 1.11 cherry }
520 1.11 cherry
521 1.11 cherry #if NIOAPIC > 0 || NACPICA > 0
522 1.11 cherry ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
523 1.11 cherry | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
524 1.11 cherry if (pic->pic_type == PIC_IOAPIC) {
525 1.11 cherry ih |= APIC_INT_VIA_APIC;
526 1.11 cherry }
527 1.11 cherry ih |= pin;
528 1.11 cherry return intr_string(ih, buf, len);
529 1.11 cherry #endif
530 1.11 cherry
531 1.11 cherry return NULL; /* No pic found! */
532 1.11 cherry }
533 1.11 cherry
534 1.23 jdolecek static struct intrsource xen_dummy_intrsource;
535 1.23 jdolecek
536 1.23 jdolecek struct intrsource *
537 1.23 jdolecek xen_intr_allocate_io_intrsource(const char *intrid)
538 1.23 jdolecek {
539 1.23 jdolecek /* Nothing to do, required by MSI code */
540 1.23 jdolecek return &xen_dummy_intrsource;
541 1.23 jdolecek }
542 1.23 jdolecek
543 1.23 jdolecek void
544 1.23 jdolecek xen_intr_free_io_intrsource(const char *intrid)
545 1.23 jdolecek {
546 1.23 jdolecek /* Nothing to do, required by MSI code */
547 1.23 jdolecek }
548 1.23 jdolecek
549 1.14 cherry #if !defined(XENPVHVM)
550 1.14 cherry __strong_alias(spllower, xen_spllower);
551 1.14 cherry __strong_alias(x86_read_psl, xen_read_psl);
552 1.14 cherry __strong_alias(x86_write_psl, xen_write_psl);
553 1.14 cherry
554 1.14 cherry __strong_alias(intr_string, xintr_string);
555 1.14 cherry __strong_alias(intr_create_intrid, xen_intr_create_intrid);
556 1.14 cherry __strong_alias(intr_establish, xen_intr_establish);
557 1.14 cherry __strong_alias(intr_establish_xname, xen_intr_establish_xname);
558 1.18 thorpej __strong_alias(intr_mask, xen_intr_mask);
559 1.18 thorpej __strong_alias(intr_unmask, xen_intr_unmask);
560 1.14 cherry __strong_alias(intr_disestablish, xen_intr_disestablish);
561 1.14 cherry __strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
562 1.14 cherry __strong_alias(cpu_intr_count, xen_cpu_intr_count);
563 1.14 cherry __strong_alias(cpu_intr_init, xen_cpu_intr_init);
564 1.23 jdolecek __strong_alias(intr_allocate_io_intrsource, xen_intr_allocate_io_intrsource);
565 1.23 jdolecek __strong_alias(intr_free_io_intrsource, xen_intr_free_io_intrsource);
566 1.14 cherry #endif /* !XENPVHVM */
567