xen_intr.c revision 1.24 1 /* $NetBSD: xen_intr.c,v 1.24 2020/04/25 15:26:17 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.24 2020/04/25 15:26:17 bouyer Exp $");
34
35 #include "opt_multiprocessor.h"
36
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/kmem.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42
43 #include <xen/intr.h>
44 #include <xen/evtchn.h>
45 #include <xen/xenfunc.h>
46
47 #include <uvm/uvm.h>
48
49 #include <machine/cpu.h>
50 #include <machine/intr.h>
51
52 #include "acpica.h"
53 #include "ioapic.h"
54 #include "lapic.h"
55 #include "pci.h"
56
57 #if NACPICA > 0
58 #include <dev/acpi/acpivar.h>
59 #endif
60
61 #if NIOAPIC > 0 || NACPICA > 0
62 #include <machine/i82093var.h>
63 #endif
64
65 #if NLAPIC > 0
66 #include <machine/i82489var.h>
67 #endif
68
69 #if NPCI > 0
70 #include <dev/pci/ppbreg.h>
71 #ifdef __HAVE_PCI_MSI_MSIX
72 #include <x86/pci/msipic.h>
73 #include <x86/pci/pci_msi_machdep.h>
74 #endif
75 #endif
76
77 #if defined(MULTIPROCESSOR)
78 static const char *xen_ipi_names[XEN_NIPIS] = XEN_IPI_NAMES;
79 #endif
80
81 #if !defined(XENPVHVM)
82 void
83 x86_disable_intr(void)
84 {
85 curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
86 x86_lfence();
87 }
88
89 void
90 x86_enable_intr(void)
91 {
92 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
93 __insn_barrier();
94 _vci->evtchn_upcall_mask = 0;
95 x86_lfence(); /* unmask then check (avoid races) */
96 if (__predict_false(_vci->evtchn_upcall_pending))
97 hypervisor_force_callback();
98 }
99
100 #endif /* !XENPVHVM */
101
102 u_long
103 xen_read_psl(void)
104 {
105
106 return (curcpu()->ci_vcpu->evtchn_upcall_mask);
107 }
108
109 void
110 xen_write_psl(u_long psl)
111 {
112 struct cpu_info *ci = curcpu();
113
114 ci->ci_vcpu->evtchn_upcall_mask = psl;
115 xen_rmb();
116 if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
117 hypervisor_force_callback();
118 }
119 }
120
121 void *
122 xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
123 int type, int level, int (*handler)(void *), void *arg,
124 bool known_mpsafe)
125 {
126
127 return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
128 handler, arg, known_mpsafe, "XEN");
129 }
130
131 void *
132 xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
133 int type, int level, int (*handler)(void *), void *arg,
134 bool known_mpsafe, const char *xname)
135 {
136 const char *intrstr;
137 char intrstr_buf[INTRIDBUF];
138
139 if (pic->pic_type == PIC_XEN) {
140 struct intrhand *rih;
141
142 intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
143 sizeof(intrstr_buf));
144
145 rih = event_set_handler(pin, handler, arg, level,
146 intrstr, xname, known_mpsafe, true);
147
148 if (rih == NULL) {
149 printf("%s: can't establish interrupt\n", __func__);
150 return NULL;
151 }
152
153 return rih;
154 } /* Else we assume pintr */
155
156 #if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */
157 struct pintrhand *pih;
158 int gsi;
159 int vector, evtchn;
160
161 KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
162 "bad legacy IRQ value: %d", legacy_irq);
163 KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
164 "non-legacy IRQon i8259 ");
165
166 gsi = xen_pic_to_gsi(pic, pin);
167
168 intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
169 sizeof(intrstr_buf));
170
171 vector = xen_vec_alloc(gsi);
172
173 if (irq2port[gsi] == 0) {
174 extern struct cpu_info phycpu_info_primary; /* XXX */
175 struct cpu_info *ci = &phycpu_info_primary;
176
177 pic->pic_addroute(pic, ci, pin, vector, type);
178
179 evtchn = bind_pirq_to_evtch(gsi);
180 KASSERT(evtchn > 0);
181 KASSERT(evtchn < NR_EVENT_CHANNELS);
182 irq2port[gsi] = evtchn + 1;
183 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
184 } else {
185 /*
186 * Shared interrupt - we can't rebind.
187 * The port is shared instead.
188 */
189 evtchn = irq2port[gsi] - 1;
190 }
191
192 pih = pirq_establish(gsi, evtchn, handler, arg, level,
193 intrstr, xname, known_mpsafe);
194 pih->pic = pic;
195 return pih;
196 #endif /* NPCI > 0 || NISA > 0 */
197
198 /* FALLTHROUGH */
199 return NULL;
200 }
201
202 /*
203 * Mask an interrupt source.
204 */
205 void
206 xen_intr_mask(struct intrhand *ih)
207 {
208 /* XXX */
209 panic("xen_intr_mask: not yet implemented.");
210 }
211
212 /*
213 * Unmask an interrupt source.
214 */
215 void
216 xen_intr_unmask(struct intrhand *ih)
217 {
218 /* XXX */
219 panic("xen_intr_unmask: not yet implemented.");
220 }
221
222 /*
223 * Deregister an interrupt handler.
224 */
225 void
226 xen_intr_disestablish(struct intrhand *ih)
227 {
228
229 if (ih->ih_pic->pic_type == PIC_XEN) {
230 event_remove_handler(ih->ih_pin, ih->ih_realfun,
231 ih->ih_realarg);
232 /* event_remove_handler frees ih */
233 return;
234 }
235 #if defined(DOM0OPS)
236 /*
237 * Cache state, to prevent a use after free situation with
238 * ih.
239 */
240
241 struct pintrhand *pih = (struct pintrhand *)ih;
242
243 int pirq = pih->pirq;
244 int port = pih->evtch;
245 KASSERT(irq2port[pirq] != 0);
246
247 pirq_disestablish(pih);
248
249 if (evtsource[port] == NULL) {
250 /*
251 * Last handler was removed by
252 * event_remove_handler().
253 *
254 * We can safely unbind the pirq now.
255 */
256
257 port = unbind_pirq_from_evtch(pirq);
258 KASSERT(port == pih->evtch);
259 irq2port[pirq] = 0;
260 }
261 #endif
262 return;
263 }
264
265 /* MI interface for kern_cpu.c */
266 void xen_cpu_intr_redistribute(void);
267
268 void
269 xen_cpu_intr_redistribute(void)
270 {
271 KASSERT(mutex_owned(&cpu_lock));
272 KASSERT(mp_online);
273
274 return;
275 }
276
277 /* MD - called by x86/cpu.c */
278 #if defined(INTRSTACKSIZE)
279 static inline bool
280 redzone_const_or_false(bool x)
281 {
282 #ifdef DIAGNOSTIC
283 return x;
284 #else
285 return false;
286 #endif /* !DIAGNOSTIC */
287 }
288
289 static inline int
290 redzone_const_or_zero(int x)
291 {
292 return redzone_const_or_false(true) ? x : 0;
293 }
294 #endif
295
296 void xen_cpu_intr_init(struct cpu_info *);
297 void
298 xen_cpu_intr_init(struct cpu_info *ci)
299 {
300 #if defined(__HAVE_PREEMPTION)
301 x86_init_preempt(ci);
302 #endif
303 x86_intr_calculatemasks(ci);
304
305 #if defined(INTRSTACKSIZE)
306 vaddr_t istack;
307
308 /*
309 * If the red zone is activated, protect both the top and
310 * the bottom of the stack with an unmapped page.
311 */
312 istack = uvm_km_alloc(kernel_map,
313 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
314 UVM_KMF_WIRED|UVM_KMF_ZERO);
315 if (redzone_const_or_false(true)) {
316 pmap_kremove(istack, PAGE_SIZE);
317 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
318 pmap_update(pmap_kernel());
319 }
320
321 /*
322 * 33 used to be 1. Arbitrarily reserve 32 more register_t's
323 * of space for ddb(4) to examine some subroutine arguments
324 * and to hunt for the next stack frame.
325 */
326 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
327 INTRSTACKSIZE - 33 * sizeof(register_t);
328 #endif
329
330 #ifdef MULTIPROCESSOR
331 for (int i = 0; i < XEN_NIPIS; i++)
332 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
333 NULL, device_xname(ci->ci_dev), xen_ipi_names[i]);
334 #endif
335
336 ci->ci_idepth = -1;
337 }
338
339 /*
340 * Everything below from here is duplicated from x86/intr.c
341 * When intr.c and xen_intr.c are unified, these will need to be
342 * merged.
343 */
344
345 u_int xen_cpu_intr_count(struct cpu_info *ci);
346
347 u_int
348 xen_cpu_intr_count(struct cpu_info *ci)
349 {
350
351 KASSERT(ci->ci_nintrhand >= 0);
352
353 return ci->ci_nintrhand;
354 }
355
356 static const char *
357 xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
358 {
359 KASSERT(pic->pic_type == PIC_XEN);
360
361 KASSERT(port >= 0);
362 KASSERT(port < NR_EVENT_CHANNELS);
363
364 snprintf(buf, len, "%s chan %d", pic->pic_name, port);
365
366 return buf;
367 }
368
369 static const char *
370 legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
371 {
372 int legacy_irq;
373
374 KASSERT(pic->pic_type == PIC_I8259);
375 #if NLAPIC > 0
376 KASSERT(APIC_IRQ_ISLEGACY(ih));
377
378 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
379 #else
380 legacy_irq = ih;
381 #endif
382 KASSERT(legacy_irq >= 0 && legacy_irq < 16);
383
384 snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
385
386 return buf;
387 }
388
389 const char * xintr_string(intr_handle_t ih, char *buf, size_t len);
390
391 const char *
392 xintr_string(intr_handle_t ih, char *buf, size_t len)
393 {
394 #if NIOAPIC > 0
395 struct ioapic_softc *pic;
396 #endif
397
398 if (ih == 0)
399 panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
400
401 #if NIOAPIC > 0
402 if (ih & APIC_INT_VIA_APIC) {
403 pic = ioapic_find(APIC_IRQ_APIC(ih));
404 if (pic != NULL) {
405 snprintf(buf, len, "%s pin %d",
406 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
407 } else {
408 snprintf(buf, len,
409 "apic %d int %d (irq %d)",
410 APIC_IRQ_APIC(ih),
411 APIC_IRQ_PIN(ih),
412 APIC_IRQ_LEGACY_IRQ(ih));
413 }
414 } else
415 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
416
417 #elif NLAPIC > 0
418 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
419 #else
420 snprintf(buf, len, "irq %d", (int) ih);
421 #endif
422 return buf;
423
424 }
425
426 /*
427 * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
428 * by MI code and intrctl(8).
429 */
430 const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic,
431 int pin, char *buf, size_t len);
432
433 const char *
434 xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
435 {
436 int ih = 0;
437
438 #if NPCI > 0 && defined(XENPV)
439 #if defined(__HAVE_PCI_MSI_MSIX)
440 if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
441 uint64_t pih;
442 int dev, vec;
443
444 dev = msipic_get_devid(pic);
445 vec = pin;
446 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
447 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
448 | APIC_INT_VIA_MSI;
449 if (pic->pic_type == PIC_MSI)
450 MSI_INT_MAKE_MSI(pih);
451 else if (pic->pic_type == PIC_MSIX)
452 MSI_INT_MAKE_MSIX(pih);
453
454 return x86_pci_msi_string(NULL, pih, buf, len);
455 }
456 #endif /* __HAVE_PCI_MSI_MSIX */
457 #endif
458
459 if (pic->pic_type == PIC_XEN) {
460 ih = pin; /* Port == pin */
461 return xen_intr_string(pin, buf, len, pic);
462 }
463
464 /*
465 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
466 * is only used in intr_string() to show the irq number.
467 * If the device is "legacy"(such as floppy), it should not use
468 * intr_string().
469 */
470 if (pic->pic_type == PIC_I8259) {
471 ih = legacy_irq;
472 return legacy_intr_string(ih, buf, len, pic);
473 }
474
475 #if NIOAPIC > 0 || NACPICA > 0
476 ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
477 | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
478 if (pic->pic_type == PIC_IOAPIC) {
479 ih |= APIC_INT_VIA_APIC;
480 }
481 ih |= pin;
482 return intr_string(ih, buf, len);
483 #endif
484
485 return NULL; /* No pic found! */
486 }
487
488 static struct intrsource xen_dummy_intrsource;
489
490 struct intrsource *
491 xen_intr_allocate_io_intrsource(const char *intrid)
492 {
493 /* Nothing to do, required by MSI code */
494 return &xen_dummy_intrsource;
495 }
496
497 void
498 xen_intr_free_io_intrsource(const char *intrid)
499 {
500 /* Nothing to do, required by MSI code */
501 }
502
503 #if defined(XENPV)
504 __strong_alias(x86_read_psl, xen_read_psl);
505 __strong_alias(x86_write_psl, xen_write_psl);
506
507 __strong_alias(intr_string, xintr_string);
508 __strong_alias(intr_create_intrid, xen_intr_create_intrid);
509 __strong_alias(intr_establish, xen_intr_establish);
510 __strong_alias(intr_establish_xname, xen_intr_establish_xname);
511 __strong_alias(intr_mask, xen_intr_mask);
512 __strong_alias(intr_unmask, xen_intr_unmask);
513 __strong_alias(intr_disestablish, xen_intr_disestablish);
514 __strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
515 __strong_alias(cpu_intr_count, xen_cpu_intr_count);
516 __strong_alias(cpu_intr_init, xen_cpu_intr_init);
517 __strong_alias(intr_allocate_io_intrsource, xen_intr_allocate_io_intrsource);
518 __strong_alias(intr_free_io_intrsource, xen_intr_free_io_intrsource);
519 #endif /* XENPV */
520