xen_intr.c revision 1.21.2.5 1 /* $NetBSD: xen_intr.c,v 1.21.2.5 2020/04/16 08:46:35 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.21.2.5 2020/04/16 08:46:35 bouyer Exp $");
34
35 #include "opt_multiprocessor.h"
36
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/kmem.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42
43 #include <xen/intr.h>
44 #include <xen/evtchn.h>
45 #include <xen/xenfunc.h>
46
47 #include <uvm/uvm.h>
48
49 #include <machine/cpu.h>
50 #include <machine/intr.h>
51
52 #include "acpica.h"
53 #include "ioapic.h"
54 #include "lapic.h"
55 #include "pci.h"
56
57 #if NACPICA > 0
58 #include <dev/acpi/acpivar.h>
59 #endif
60
61 #if NIOAPIC > 0 || NACPICA > 0
62 #include <machine/i82093var.h>
63 #endif
64
65 #if NLAPIC > 0
66 #include <machine/i82489var.h>
67 #endif
68
69 #if NPCI > 0
70 #include <dev/pci/ppbreg.h>
71 #endif
72
73 #if defined(MULTIPROCESSOR)
74 static const char *xen_ipi_names[XEN_NIPIS] = XEN_IPI_NAMES;
75 #endif
76
77 #if !defined(XENPVHVM)
78 void
79 x86_disable_intr(void)
80 {
81 curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
82 x86_lfence();
83 }
84
85 void
86 x86_enable_intr(void)
87 {
88 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
89 __insn_barrier();
90 _vci->evtchn_upcall_mask = 0;
91 x86_lfence(); /* unmask then check (avoid races) */
92 if (__predict_false(_vci->evtchn_upcall_pending))
93 hypervisor_force_callback();
94 }
95
96 #endif /* !XENPVHVM */
97
98 u_long
99 xen_read_psl(void)
100 {
101
102 return (curcpu()->ci_vcpu->evtchn_upcall_mask);
103 }
104
105 void
106 xen_write_psl(u_long psl)
107 {
108 struct cpu_info *ci = curcpu();
109
110 ci->ci_vcpu->evtchn_upcall_mask = psl;
111 xen_rmb();
112 if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
113 hypervisor_force_callback();
114 }
115 }
116
117 void *
118 xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
119 int type, int level, int (*handler)(void *), void *arg,
120 bool known_mpsafe)
121 {
122
123 return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
124 handler, arg, known_mpsafe, "XEN");
125 }
126
127 void *
128 xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
129 int type, int level, int (*handler)(void *), void *arg,
130 bool known_mpsafe, const char *xname)
131 {
132 const char *intrstr;
133 char intrstr_buf[INTRIDBUF];
134
135 if (pic->pic_type == PIC_XEN) {
136 struct intrhand *rih;
137
138 intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
139 sizeof(intrstr_buf));
140
141 event_set_handler(pin, handler, arg, level, intrstr, xname,
142 known_mpsafe);
143
144 rih = kmem_zalloc(sizeof(*rih), cold ? KM_NOSLEEP : KM_SLEEP);
145 if (rih == NULL) {
146 printf("%s: can't allocate handler info\n", __func__);
147 return NULL;
148 }
149
150 /*
151 * XXX:
152 * This is just a copy for API conformance.
153 * The real ih is lost in the innards of
154 * event_set_handler(); where the details of
155 * biglock_wrapper etc are taken care of.
156 * All that goes away when we nuke event_set_handler()
157 * et. al. and unify with x86/intr.c
158 */
159 rih->ih_pin = pin; /* port */
160 rih->ih_fun = rih->ih_realfun = handler;
161 rih->ih_arg = rih->ih_realarg = arg;
162 rih->pic_type = pic->pic_type;
163 return rih;
164 } /* Else we assume pintr */
165
166 #if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */
167 struct pintrhand *pih;
168 int gsi;
169 int vector, evtchn;
170
171 KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
172 "bad legacy IRQ value: %d", legacy_irq);
173 KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
174 "non-legacy IRQon i8259 ");
175
176 gsi = xen_pic_to_gsi(pic, pin);
177
178 intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
179 sizeof(intrstr_buf));
180
181 vector = xen_vec_alloc(gsi);
182
183 if (irq2port[gsi] == 0) {
184 extern struct cpu_info phycpu_info_primary; /* XXX */
185 struct cpu_info *ci = &phycpu_info_primary;
186
187 pic->pic_addroute(pic, ci, pin, vector, type);
188
189 evtchn = bind_pirq_to_evtch(gsi);
190 KASSERT(evtchn > 0);
191 KASSERT(evtchn < NR_EVENT_CHANNELS);
192 irq2port[gsi] = evtchn + 1;
193 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
194 } else {
195 /*
196 * Shared interrupt - we can't rebind.
197 * The port is shared instead.
198 */
199 evtchn = irq2port[gsi] - 1;
200 }
201
202 pih = pirq_establish(gsi, evtchn, handler, arg, level,
203 intrstr, xname, known_mpsafe);
204 pih->pic_type = pic->pic_type;
205 return pih;
206 #endif /* NPCI > 0 || NISA > 0 */
207
208 /* FALLTHROUGH */
209 return NULL;
210 }
211
212 /*
213 * Mask an interrupt source.
214 */
215 void
216 xen_intr_mask(struct intrhand *ih)
217 {
218 /* XXX */
219 panic("xen_intr_mask: not yet implemented.");
220 }
221
222 /*
223 * Unmask an interrupt source.
224 */
225 void
226 xen_intr_unmask(struct intrhand *ih)
227 {
228 /* XXX */
229 panic("xen_intr_unmask: not yet implemented.");
230 }
231
232 /*
233 * Deregister an interrupt handler.
234 */
235 void
236 xen_intr_disestablish(struct intrhand *ih)
237 {
238
239 if (ih->pic_type == PIC_XEN) {
240 event_remove_handler(ih->ih_pin, ih->ih_realfun,
241 ih->ih_realarg);
242 kmem_free(ih, sizeof(*ih));
243 return;
244 }
245 #if defined(DOM0OPS)
246 /*
247 * Cache state, to prevent a use after free situation with
248 * ih.
249 */
250
251 struct pintrhand *pih = (struct pintrhand *)ih;
252
253 int pirq = pih->pirq;
254 int port = pih->evtch;
255 KASSERT(irq2port[pirq] != 0);
256
257 pirq_disestablish(pih);
258
259 if (evtsource[port] == NULL) {
260 /*
261 * Last handler was removed by
262 * event_remove_handler().
263 *
264 * We can safely unbind the pirq now.
265 */
266
267 port = unbind_pirq_from_evtch(pirq);
268 KASSERT(port == pih->evtch);
269 irq2port[pirq] = 0;
270 }
271 #endif
272 return;
273 }
274
275 /* MI interface for kern_cpu.c */
276 void xen_cpu_intr_redistribute(void);
277
278 void
279 xen_cpu_intr_redistribute(void)
280 {
281 KASSERT(mutex_owned(&cpu_lock));
282 KASSERT(mp_online);
283
284 return;
285 }
286
287 /* MD - called by x86/cpu.c */
288 #if defined(INTRSTACKSIZE)
289 static inline bool
290 redzone_const_or_false(bool x)
291 {
292 #ifdef DIAGNOSTIC
293 return x;
294 #else
295 return false;
296 #endif /* !DIAGNOSTIC */
297 }
298
299 static inline int
300 redzone_const_or_zero(int x)
301 {
302 return redzone_const_or_false(true) ? x : 0;
303 }
304 #endif
305
306 void xen_cpu_intr_init(struct cpu_info *);
307 void
308 xen_cpu_intr_init(struct cpu_info *ci)
309 {
310 #if defined(__HAVE_PREEMPTION)
311 x86_init_preempt(ci);
312 #endif
313 x86_intr_calculatemasks(ci);
314
315 #if defined(INTRSTACKSIZE)
316 vaddr_t istack;
317
318 /*
319 * If the red zone is activated, protect both the top and
320 * the bottom of the stack with an unmapped page.
321 */
322 istack = uvm_km_alloc(kernel_map,
323 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
324 UVM_KMF_WIRED|UVM_KMF_ZERO);
325 if (redzone_const_or_false(true)) {
326 pmap_kremove(istack, PAGE_SIZE);
327 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
328 pmap_update(pmap_kernel());
329 }
330
331 /*
332 * 33 used to be 1. Arbitrarily reserve 32 more register_t's
333 * of space for ddb(4) to examine some subroutine arguments
334 * and to hunt for the next stack frame.
335 */
336 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
337 INTRSTACKSIZE - 33 * sizeof(register_t);
338 #endif
339
340 #ifdef MULTIPROCESSOR
341 for (int i = 0; i < XEN_NIPIS; i++)
342 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
343 NULL, device_xname(ci->ci_dev), xen_ipi_names[i]);
344 #endif
345
346 ci->ci_idepth = -1;
347 }
348
349 /*
350 * Everything below from here is duplicated from x86/intr.c
351 * When intr.c and xen_intr.c are unified, these will need to be
352 * merged.
353 */
354
355 u_int xen_cpu_intr_count(struct cpu_info *ci);
356
357 u_int
358 xen_cpu_intr_count(struct cpu_info *ci)
359 {
360
361 KASSERT(ci->ci_nintrhand >= 0);
362
363 return ci->ci_nintrhand;
364 }
365
366 static const char *
367 xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
368 {
369 KASSERT(pic->pic_type == PIC_XEN);
370
371 KASSERT(port >= 0);
372 KASSERT(port < NR_EVENT_CHANNELS);
373
374 snprintf(buf, len, "%s channel %d", pic->pic_name, port);
375
376 return buf;
377 }
378
379 static const char *
380 legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
381 {
382 int legacy_irq;
383
384 KASSERT(pic->pic_type == PIC_I8259);
385 #if NLAPIC > 0
386 KASSERT(APIC_IRQ_ISLEGACY(ih));
387
388 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
389 #else
390 legacy_irq = ih;
391 #endif
392 KASSERT(legacy_irq >= 0 && legacy_irq < 16);
393
394 snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
395
396 return buf;
397 }
398
399 const char * xintr_string(intr_handle_t ih, char *buf, size_t len);
400
401 const char *
402 xintr_string(intr_handle_t ih, char *buf, size_t len)
403 {
404 #if NIOAPIC > 0
405 struct ioapic_softc *pic;
406 #endif
407
408 if (ih == 0)
409 panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
410
411 #if NIOAPIC > 0
412 if (ih & APIC_INT_VIA_APIC) {
413 pic = ioapic_find(APIC_IRQ_APIC(ih));
414 if (pic != NULL) {
415 snprintf(buf, len, "%s pin %d",
416 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
417 } else {
418 snprintf(buf, len,
419 "apic %d int %d (irq %d)",
420 APIC_IRQ_APIC(ih),
421 APIC_IRQ_PIN(ih),
422 APIC_IRQ_LEGACY_IRQ(ih));
423 }
424 } else
425 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
426
427 #elif NLAPIC > 0
428 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
429 #else
430 snprintf(buf, len, "irq %d", (int) ih);
431 #endif
432 return buf;
433
434 }
435
436 /*
437 * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
438 * by MI code and intrctl(8).
439 */
440 const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic,
441 int pin, char *buf, size_t len);
442
443 const char *
444 xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
445 {
446 int ih = 0;
447
448 #if NPCI > 0 && defined(XENPV)
449 #if defined(__HAVE_PCI_MSI_MSIX)
450 if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
451 uint64_t pih;
452 int dev, vec;
453
454 dev = msipic_get_devid(pic);
455 vec = pin;
456 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
457 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
458 | APIC_INT_VIA_MSI;
459 if (pic->pic_type == PIC_MSI)
460 MSI_INT_MAKE_MSI(pih);
461 else if (pic->pic_type == PIC_MSIX)
462 MSI_INT_MAKE_MSIX(pih);
463
464 return x86_pci_msi_string(NULL, pih, buf, len);
465 }
466 #endif /* __HAVE_PCI_MSI_MSIX */
467 #endif
468
469 if (pic->pic_type == PIC_XEN) {
470 ih = pin; /* Port == pin */
471 return xen_intr_string(pin, buf, len, pic);
472 }
473
474 /*
475 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
476 * is only used in intr_string() to show the irq number.
477 * If the device is "legacy"(such as floppy), it should not use
478 * intr_string().
479 */
480 if (pic->pic_type == PIC_I8259) {
481 ih = legacy_irq;
482 return legacy_intr_string(ih, buf, len, pic);
483 }
484
485 #if NIOAPIC > 0 || NACPICA > 0
486 ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
487 | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
488 if (pic->pic_type == PIC_IOAPIC) {
489 ih |= APIC_INT_VIA_APIC;
490 }
491 ih |= pin;
492 return intr_string(ih, buf, len);
493 #endif
494
495 return NULL; /* No pic found! */
496 }
497
498 #if !defined(XENPVHVM)
499 __strong_alias(x86_read_psl, xen_read_psl);
500 __strong_alias(x86_write_psl, xen_write_psl);
501
502 __strong_alias(intr_string, xintr_string);
503 __strong_alias(intr_create_intrid, xen_intr_create_intrid);
504 __strong_alias(intr_establish, xen_intr_establish);
505 __strong_alias(intr_establish_xname, xen_intr_establish_xname);
506 __strong_alias(intr_mask, xen_intr_mask);
507 __strong_alias(intr_unmask, xen_intr_unmask);
508 __strong_alias(intr_disestablish, xen_intr_disestablish);
509 __strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
510 __strong_alias(cpu_intr_count, xen_cpu_intr_count);
511 __strong_alias(cpu_intr_init, xen_cpu_intr_init);
512 #endif /* !XENPVHVM */
513