1 /* $NetBSD: evtchn.c,v 1.101 2026/01/12 21:42:52 bouyer Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * 30 * Copyright (c) 2004 Christian Limpach. 31 * Copyright (c) 2004, K A Fraser. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 */ 54 55 56 #include <sys/cdefs.h> 57 __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.101 2026/01/12 21:42:52 bouyer Exp $"); 58 59 #include "opt_xen.h" 60 #include "isa.h" 61 #include "pci.h" 62 63 #include <sys/param.h> 64 #include <sys/cpu.h> 65 #include <sys/kernel.h> 66 #include <sys/systm.h> 67 #include <sys/device.h> 68 #include <sys/proc.h> 69 #include <sys/kmem.h> 70 #include <sys/reboot.h> 71 #include <sys/mutex.h> 72 #include <sys/interrupt.h> 73 #include <sys/xcall.h> 74 75 #include <uvm/uvm.h> 76 77 #include <xen/intr.h> 78 79 #include <xen/xen.h> 80 #include <xen/hypervisor.h> 81 #include <xen/evtchn.h> 82 #include <xen/xenfunc.h> 83 84 /* maximum number of (v)CPUs supported */ 85 #ifdef XENPV 86 #define NBSD_XEN_MAX_VCPUS XEN_LEGACY_MAX_VCPUS 87 #else 88 #include <xen/include/public/hvm/hvm_info_table.h> 89 #define NBSD_XEN_MAX_VCPUS HVM_MAX_VCPUS 90 #endif 91 92 #define NR_PIRQS NR_EVENT_CHANNELS 93 94 /* 95 * This lock protects updates to the following mapping and reference-count 96 * arrays. The lock does not need to be acquired to read the mapping tables. 97 */ 98 static kmutex_t evtchn_lock; 99 100 /* event handlers */ 101 struct evtsource *evtsource[NR_EVENT_CHANNELS]; 102 103 /* Reference counts for bindings to event channels XXX: redo for SMP */ 104 static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; 105 106 /* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ 107 static evtchn_port_t vcpu_ipi_to_evtch[NBSD_XEN_MAX_VCPUS]; 108 109 /* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ 110 static int virq_timer_to_evtch[NBSD_XEN_MAX_VCPUS]; 111 112 /* event-channel <-> VIRQ mapping. */ 113 static int virq_to_evtch[NR_VIRQS]; 114 115 116 #if defined(XENPV) && (NPCI > 0 || NISA > 0) 117 /* event-channel <-> PIRQ mapping */ 118 static int pirq_to_evtch[NR_PIRQS]; 119 /* PIRQ needing notify */ 120 static int evtch_to_pirq_eoi[NR_EVENT_CHANNELS]; 121 int pirq_interrupt(void *); 122 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */ 123 124 static void xen_evtchn_mask(struct pic *, int); 125 static void xen_evtchn_unmask(struct pic *, int); 126 static void xen_evtchn_addroute(struct pic *, struct cpu_info *, int, int, int); 127 static void xen_evtchn_delroute(struct pic *, struct cpu_info *, int, int, int); 128 static bool xen_evtchn_trymask(struct pic *, int); 129 static void xen_intr_get_devname(const char *, char *, size_t); 130 static void xen_intr_get_assigned(const char *, kcpuset_t *); 131 static uint64_t xen_intr_get_count(const char *, u_int); 132 133 struct pic xen_pic = { 134 .pic_name = "xenev0", 135 .pic_type = PIC_XEN, 136 .pic_vecbase = 0, 137 .pic_apicid = 0, 138 .pic_lock = __SIMPLELOCK_UNLOCKED, 139 .pic_hwmask = xen_evtchn_mask, 140 .pic_hwunmask = xen_evtchn_unmask, 141 .pic_addroute = xen_evtchn_addroute, 142 .pic_delroute = xen_evtchn_delroute, 143 .pic_trymask = xen_evtchn_trymask, 144 .pic_level_stubs = xenev_stubs, 145 .pic_edge_stubs = xenev_stubs, 146 .pic_intr_get_devname = xen_intr_get_devname, 147 .pic_intr_get_assigned = xen_intr_get_assigned, 148 .pic_intr_get_count = xen_intr_get_count, 149 }; 150 151 /* 152 * We try to stick to the traditional x86 PIC semantics wrt Xen 153 * events. 154 * 155 * PIC pins exist in a global namespace which may be hierarchical, and 156 * are mapped to a cpu bus concept called 'IRQ' numbers, which are 157 * also global, but linear. Thus a PIC, pin tuple will always map to 158 * an IRQ number. These tuples can alias to the same IRQ number, thus 159 * causing IRQ "sharing". IRQ numbers can be bound to specific CPUs, 160 * and to specific callback vector indices on the CPU called idt_vec, 161 * which are aliases to handlers meant to run on destination 162 * CPUs. This binding can also happen at interrupt time and resolved 163 * 'round-robin' between all CPUs, depending on the lapic setup. In 164 * this case, all CPUs need to have identical idt_vec->handler 165 * mappings. 166 * 167 * The job of pic_addroute() is to setup the 'wiring' between the 168 * source pin, and the destination CPU handler, ideally on a specific 169 * CPU in MP systems (or 'round-robin'). 170 * 171 * On Xen, a global namespace of 'events' exist, which are initially 172 * bound to nothing. This is similar to the relationship between 173 * realworld realworld IRQ numbers wrt PIC pins, since before routing, 174 * IRQ numbers by themselves have no causal connection setup with the 175 * real world. (Except for the hardwired cases on the PC Architecture, 176 * which we ignore for the purpose of this description). However the 177 * really important routing is from pin to idt_vec. On PIC_XEN, all 178 * three (pic, irq, idt_vec) belong to the same namespace and are 179 * identical. Further, the mapping between idt_vec and the actual 180 * callback handler is setup via calls to the evtchn.h api - this 181 * last bit is analogous to x86/idt.c:idt_vec_set() on real h/w 182 * 183 * For now we handle two cases: 184 * - IPC style events - eg: timer, PV devices, etc. 185 * - dom0 physical irq bound events. 186 * 187 * In the case of IPC style events, we currently externalise the 188 * event binding by using evtchn.h functions. From the POV of 189 * PIC_XEN , 'pin' , 'irq' and 'idt_vec' are all identical to the 190 * port number of the event. 191 * 192 * In the case of dom0 physical irq bound events, we currently 193 * event binding by exporting evtchn.h functions. From the POV of 194 * PIC_LAPIC/PIC_IOAPIC, the 'pin' is the hardware pin, the 'irq' is 195 * the x86 global irq number - the port number is extracted out of a 196 * global array (this is currently kludgy and breaks API abstraction) 197 * and the binding happens during pic_addroute() of the ioapic. 198 * 199 * Later when we integrate more tightly with x86/intr.c, we will be 200 * able to conform better to (PIC_LAPIC/PIC_IOAPIC)->PIC_XEN 201 * cascading model. 202 */ 203 204 int debug_port = -1; 205 206 /* #define IRQ_DEBUG 4 */ 207 208 /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */ 209 #ifdef MULTIPROCESSOR 210 211 /* 212 * intr_biglock_wrapper: grab biglock and call a real interrupt handler. 213 */ 214 215 int 216 xen_intr_biglock_wrapper(void *vp) 217 { 218 struct intrhand *ih = vp; 219 int ret; 220 221 KERNEL_LOCK(1, NULL); 222 223 ret = (*ih->ih_realfun)(ih->ih_realarg); 224 225 KERNEL_UNLOCK_ONE(NULL); 226 227 return ret; 228 } 229 #endif /* MULTIPROCESSOR */ 230 231 void 232 events_default_setup(void) 233 { 234 int i; 235 236 /* No VCPU -> event mappings. */ 237 for (i = 0; i < NBSD_XEN_MAX_VCPUS; i++) 238 vcpu_ipi_to_evtch[i] = -1; 239 240 /* No VIRQ_TIMER -> event mappings. */ 241 for (i = 0; i < NBSD_XEN_MAX_VCPUS; i++) 242 virq_timer_to_evtch[i] = -1; 243 244 /* No VIRQ -> event mappings. */ 245 for (i = 0; i < NR_VIRQS; i++) 246 virq_to_evtch[i] = -1; 247 248 #if defined(XENPV) && (NPCI > 0 || NISA > 0) 249 /* No PIRQ -> event mappings. */ 250 for (i = 0; i < NR_PIRQS; i++) 251 pirq_to_evtch[i] = -1; 252 for (i = 0; i < NR_EVENT_CHANNELS; i++) 253 evtch_to_pirq_eoi[i] = -1; 254 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */ 255 256 /* No event-channel are 'live' right now. */ 257 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 258 evtsource[i] = NULL; 259 evtch_bindcount[i] = 0; 260 hypervisor_mask_event(i); 261 } 262 263 } 264 265 void 266 events_init(void) 267 { 268 mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE); 269 270 (void)events_resume(); 271 } 272 273 bool 274 events_resume(void) 275 { 276 debug_port = bind_virq_to_evtch(VIRQ_DEBUG); 277 278 KASSERT(debug_port != -1); 279 280 aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n", 281 debug_port); 282 /* 283 * Don't call event_set_handler(), we'll use a shortcut. Just set 284 * evtsource[] to a non-NULL value so that evtchn_do_event will 285 * be called. 286 */ 287 evtsource[debug_port] = (void *)-1; 288 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port); 289 hypervisor_unmask_event(debug_port); 290 x86_enable_intr(); /* at long last... */ 291 292 return true; 293 } 294 295 bool 296 events_suspend(void) 297 { 298 int evtch; 299 300 x86_disable_intr(); 301 302 /* VIRQ_DEBUG is the last interrupt to remove */ 303 evtch = unbind_virq_from_evtch(VIRQ_DEBUG); 304 305 KASSERT(evtch != -1); 306 307 hypervisor_mask_event(evtch); 308 /* Remove the non-NULL value set in events_init() */ 309 evtsource[evtch] = NULL; 310 aprint_verbose("VIRQ_DEBUG interrupt disabled, " 311 "event channel %d removed\n", evtch); 312 313 return true; 314 } 315 316 unsigned int 317 evtchn_do_event(int evtch, struct intrframe *regs) 318 { 319 struct cpu_info *ci; 320 int ilevel; 321 struct intrhand *ih; 322 int (*ih_fun)(void *, void *); 323 uint64_t iplmask; 324 325 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); 326 KASSERTMSG(evtch < NR_EVENT_CHANNELS, 327 "evtch number %d > NR_EVENT_CHANNELS", evtch); 328 329 #ifdef IRQ_DEBUG 330 if (evtch == IRQ_DEBUG) 331 printf("evtchn_do_event: evtch %d\n", evtch); 332 #endif 333 ci = curcpu(); 334 335 /* 336 * Shortcut for the debug handler, we want it to always run, 337 * regardless of the IPL level. 338 */ 339 if (__predict_false(evtch == debug_port)) { 340 xen_debug_handler(NULL); 341 hypervisor_unmask_event(debug_port); 342 return 0; 343 } 344 345 KASSERTMSG(evtsource[evtch] != NULL, "unknown event %d", evtch); 346 347 if (evtsource[evtch]->ev_cpu != ci) 348 return 0; 349 350 ci->ci_data.cpu_nintr++; 351 evtsource[evtch]->ev_evcnt.ev_count++; 352 ilevel = ci->ci_ilevel; 353 354 if (evtsource[evtch]->ev_maxlevel <= ilevel) { 355 #ifdef IRQ_DEBUG 356 if (evtch == IRQ_DEBUG) 357 printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n", 358 evtch, evtsource[evtch]->ev_maxlevel, ilevel); 359 #endif 360 hypervisor_set_ipending(evtsource[evtch]->ev_imask, 361 evtch >> LONG_SHIFT, 362 evtch & LONG_MASK); 363 ih = evtsource[evtch]->ev_handlers; 364 while (ih != NULL) { 365 ih->ih_pending++; 366 ih = ih->ih_evt_next; 367 } 368 369 /* leave masked */ 370 371 return 0; 372 } 373 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; 374 iplmask = evtsource[evtch]->ev_imask; 375 KASSERT(ci->ci_ilevel >= IPL_VM); 376 KASSERT(cpu_intr_p()); 377 x86_enable_intr(); 378 ih = evtsource[evtch]->ev_handlers; 379 while (ih != NULL) { 380 KASSERT(ih->ih_cpu == ci); 381 if (ih->ih_level <= ilevel) { 382 #ifdef IRQ_DEBUG 383 if (evtch == IRQ_DEBUG) 384 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); 385 #endif 386 x86_disable_intr(); 387 hypervisor_set_ipending(iplmask, 388 evtch >> LONG_SHIFT, evtch & LONG_MASK); 389 /* leave masked */ 390 while (ih != NULL) { 391 ih->ih_pending++; 392 ih = ih->ih_evt_next; 393 } 394 goto splx; 395 } 396 iplmask &= ~(1ULL << XEN_IPL2SIR(ih->ih_level)); 397 ci->ci_ilevel = ih->ih_level; 398 ih->ih_pending = 0; 399 ih_fun = (void *)ih->ih_fun; 400 ih_fun(ih->ih_arg, regs); 401 ih = ih->ih_evt_next; 402 } 403 x86_disable_intr(); 404 hypervisor_unmask_event(evtch); 405 #if defined(XENPV) && (NPCI > 0 || NISA > 0) 406 hypervisor_ack_pirq_event(evtch); 407 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */ 408 409 splx: 410 ci->ci_ilevel = ilevel; 411 return 0; 412 } 413 414 #define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ 415 416 /* PIC callbacks */ 417 /* pic "pin"s are conceptually mapped to event port numbers */ 418 static void 419 xen_evtchn_mask(struct pic *pic, int pin) 420 { 421 evtchn_port_t evtchn = pin; 422 423 KASSERT(pic->pic_type == PIC_XEN); 424 KASSERT(evtchn < NR_EVENT_CHANNELS); 425 426 hypervisor_mask_event(evtchn); 427 } 428 429 static void 430 xen_evtchn_unmask(struct pic *pic, int pin) 431 { 432 evtchn_port_t evtchn = pin; 433 434 KASSERT(pic->pic_type == PIC_XEN); 435 KASSERT(evtchn < NR_EVENT_CHANNELS); 436 437 hypervisor_unmask_event(evtchn); 438 439 } 440 441 442 static void 443 xen_evtchn_addroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) 444 { 445 446 evtchn_port_t evtchn = pin; 447 448 /* Events are simulated as level triggered interrupts */ 449 KASSERT(type == IST_LEVEL); 450 451 KASSERT(evtchn < NR_EVENT_CHANNELS); 452 #if notyet 453 evtchn_port_t boundport = idt_vec; 454 #endif 455 456 KASSERT(pic->pic_type == PIC_XEN); 457 458 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn); 459 460 } 461 462 static void 463 xen_evtchn_delroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) 464 { 465 /* 466 * XXX: In the future, this is a great place to 467 * 'unbind' events to underlying events and cpus. 468 * For now, just disable interrupt servicing on this cpu for 469 * this pin aka cpu. 470 */ 471 evtchn_port_t evtchn = pin; 472 473 /* Events are simulated as level triggered interrupts */ 474 KASSERT(type == IST_LEVEL); 475 476 KASSERT(evtchn < NR_EVENT_CHANNELS); 477 #if notyet 478 evtchn_port_t boundport = idt_vec; 479 #endif 480 481 KASSERT(pic->pic_type == PIC_XEN); 482 483 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtchn); 484 } 485 486 /* 487 * xen_evtchn_trymask(pic, pin) 488 * 489 * If there are interrupts pending on the bus-shared pic, return 490 * false. Otherwise, mask interrupts on the bus-shared pic and 491 * return true. 492 */ 493 static bool 494 xen_evtchn_trymask(struct pic *pic, int pin) 495 { 496 volatile struct shared_info *s = HYPERVISOR_shared_info; 497 unsigned long masked __diagused; 498 499 /* Mask it. */ 500 masked = xen_atomic_test_and_set_bit(&s->evtchn_mask[0], pin); 501 502 /* 503 * Caller is responsible for calling trymask only when the 504 * interrupt pin is not masked, and for serializing calls to 505 * trymask. 506 */ 507 KASSERT(!masked); 508 509 /* 510 * Check whether there were any interrupts pending when we 511 * masked it. If there were, unmask and abort. 512 */ 513 if (xen_atomic_test_bit(&s->evtchn_pending[0], pin)) { 514 xen_atomic_clear_bit(&s->evtchn_mask[0], pin); 515 return false; 516 } 517 518 /* Success: masked, not pending. */ 519 return true; 520 } 521 522 evtchn_port_t 523 bind_vcpu_to_evtch(cpuid_t vcpu) 524 { 525 evtchn_op_t op; 526 evtchn_port_t evtchn; 527 528 mutex_spin_enter(&evtchn_lock); 529 530 evtchn = vcpu_ipi_to_evtch[vcpu]; 531 if (evtchn == -1) { 532 op.cmd = EVTCHNOP_bind_ipi; 533 op.u.bind_ipi.vcpu = (uint32_t) vcpu; 534 if (HYPERVISOR_event_channel_op(&op) != 0) 535 panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu); 536 evtchn = op.u.bind_ipi.port; 537 538 vcpu_ipi_to_evtch[vcpu] = evtchn; 539 } 540 541 evtch_bindcount[evtchn]++; 542 543 mutex_spin_exit(&evtchn_lock); 544 545 return evtchn; 546 } 547 548 int 549 bind_virq_to_evtch(int virq) 550 { 551 evtchn_op_t op; 552 int evtchn; 553 554 mutex_spin_enter(&evtchn_lock); 555 556 /* 557 * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER. 558 * Please re-visit this implementation when others are used. 559 * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. 560 * XXX: event->virq/ipi can be unified in a linked-list 561 * implementation. 562 */ 563 struct cpu_info *ci = curcpu(); 564 565 if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { 566 mutex_spin_exit(&evtchn_lock); 567 return -1; 568 } 569 570 if (virq == VIRQ_TIMER) { 571 evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; 572 } else { 573 evtchn = virq_to_evtch[virq]; 574 } 575 576 /* Allocate a channel if there is none already allocated */ 577 if (evtchn == -1) { 578 op.cmd = EVTCHNOP_bind_virq; 579 op.u.bind_virq.virq = virq; 580 op.u.bind_virq.vcpu = ci->ci_vcpuid; 581 if (HYPERVISOR_event_channel_op(&op) != 0) 582 panic("Failed to bind virtual IRQ %d\n", virq); 583 evtchn = op.u.bind_virq.port; 584 } 585 586 /* Set event channel */ 587 if (virq == VIRQ_TIMER) { 588 virq_timer_to_evtch[ci->ci_vcpuid] = evtchn; 589 } else { 590 virq_to_evtch[virq] = evtchn; 591 } 592 593 /* Increase ref counter */ 594 evtch_bindcount[evtchn]++; 595 596 mutex_spin_exit(&evtchn_lock); 597 598 return evtchn; 599 } 600 601 int 602 unbind_virq_from_evtch(int virq) 603 { 604 evtchn_op_t op; 605 int evtchn; 606 607 struct cpu_info *ci = curcpu(); 608 609 if (virq == VIRQ_TIMER) { 610 evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; 611 } 612 else { 613 evtchn = virq_to_evtch[virq]; 614 } 615 616 if (evtchn == -1) { 617 return -1; 618 } 619 620 mutex_spin_enter(&evtchn_lock); 621 622 evtch_bindcount[evtchn]--; 623 if (evtch_bindcount[evtchn] == 0) { 624 op.cmd = EVTCHNOP_close; 625 op.u.close.port = evtchn; 626 if (HYPERVISOR_event_channel_op(&op) != 0) 627 panic("Failed to unbind virtual IRQ %d\n", virq); 628 629 if (virq == VIRQ_TIMER) { 630 virq_timer_to_evtch[ci->ci_vcpuid] = -1; 631 } else { 632 virq_to_evtch[virq] = -1; 633 } 634 } 635 636 mutex_spin_exit(&evtchn_lock); 637 638 return evtchn; 639 } 640 641 #if defined(XENPV) && (NPCI > 0 || NISA > 0) 642 int 643 get_pirq_to_evtch(int pirq) 644 { 645 int evtchn; 646 647 if (pirq == -1) /* Match previous behaviour */ 648 return -1; 649 650 if (pirq >= NR_PIRQS) { 651 panic("pirq %d out of bound, increase NR_PIRQS", pirq); 652 } 653 mutex_spin_enter(&evtchn_lock); 654 655 evtchn = pirq_to_evtch[pirq]; 656 657 mutex_spin_exit(&evtchn_lock); 658 659 return evtchn; 660 } 661 662 int 663 bind_pirq_to_evtch(int pirq) 664 { 665 evtchn_op_t op; 666 int evtchn; 667 668 if (pirq >= NR_PIRQS) { 669 panic("pirq %d out of bound, increase NR_PIRQS", pirq); 670 } 671 672 mutex_spin_enter(&evtchn_lock); 673 674 evtchn = pirq_to_evtch[pirq]; 675 if (evtchn == -1) { 676 op.cmd = EVTCHNOP_bind_pirq; 677 op.u.bind_pirq.pirq = pirq; 678 op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE; 679 if (HYPERVISOR_event_channel_op(&op) != 0) 680 panic("Failed to bind physical IRQ %d\n", pirq); 681 evtchn = op.u.bind_pirq.port; 682 683 #ifdef IRQ_DEBUG 684 printf("pirq %d evtchn %d\n", pirq, evtchn); 685 #endif 686 pirq_to_evtch[pirq] = evtchn; 687 } 688 689 evtch_bindcount[evtchn]++; 690 691 mutex_spin_exit(&evtchn_lock); 692 693 return evtchn; 694 } 695 696 int 697 unbind_pirq_from_evtch(int pirq) 698 { 699 evtchn_op_t op; 700 int evtchn = pirq_to_evtch[pirq]; 701 702 mutex_spin_enter(&evtchn_lock); 703 704 evtch_bindcount[evtchn]--; 705 if (evtch_bindcount[evtchn] == 0) { 706 op.cmd = EVTCHNOP_close; 707 op.u.close.port = evtchn; 708 if (HYPERVISOR_event_channel_op(&op) != 0) 709 panic("Failed to unbind physical IRQ %d\n", pirq); 710 711 pirq_to_evtch[pirq] = -1; 712 } 713 714 mutex_spin_exit(&evtchn_lock); 715 716 return evtchn; 717 } 718 719 struct pintrhand * 720 pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level, 721 const char *intrname, const char *xname, bool known_mpsafe) 722 { 723 struct pintrhand *ih; 724 725 ih = kmem_zalloc(sizeof(struct pintrhand), 726 cold ? KM_NOSLEEP : KM_SLEEP); 727 if (ih == NULL) { 728 printf("pirq_establish: can't allocate handler info\n"); 729 return NULL; 730 } 731 732 KASSERT(evtch > 0); 733 734 ih->pirq = pirq; 735 ih->evtch = evtch; 736 ih->func = func; 737 ih->arg = arg; 738 739 if (event_set_handler(evtch, pirq_interrupt, ih, level, intrname, 740 xname, known_mpsafe, NULL) == NULL) { 741 kmem_free(ih, sizeof(struct pintrhand)); 742 return NULL; 743 } 744 745 hypervisor_prime_pirq_event(pirq, evtch); 746 hypervisor_unmask_event(evtch); 747 hypervisor_ack_pirq_event(evtch); 748 return ih; 749 } 750 751 void 752 pirq_disestablish(struct pintrhand *ih) 753 { 754 int error = event_remove_handler(ih->evtch, pirq_interrupt, ih); 755 if (error) { 756 printf("pirq_disestablish(%p): %d\n", ih, error); 757 return; 758 } 759 kmem_free(ih, sizeof(struct pintrhand)); 760 } 761 762 int 763 pirq_interrupt(void *arg) 764 { 765 struct pintrhand *ih = arg; 766 int ret; 767 768 ret = ih->func(ih->arg); 769 #ifdef IRQ_DEBUG 770 if (ih->evtch == IRQ_DEBUG) 771 printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret); 772 #endif 773 return ret; 774 } 775 776 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */ 777 778 779 /* 780 * Recalculate the interrupt from scratch for an event source. 781 */ 782 static void 783 intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) 784 { 785 struct intrhand *ih; 786 int cpu_receive = 0; 787 788 evts->ev_maxlevel = IPL_NONE; 789 evts->ev_imask = 0; 790 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { 791 KASSERT(ih->ih_cpu == curcpu()); 792 if (ih->ih_level > evts->ev_maxlevel) 793 evts->ev_maxlevel = ih->ih_level; 794 evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level)); 795 if (ih->ih_cpu == ci) 796 cpu_receive = 1; 797 } 798 if (cpu_receive) 799 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); 800 else 801 xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); 802 } 803 804 805 struct event_set_handler_args { 806 struct intrhand *ih; 807 struct intrsource *ipls; 808 struct evtsource *evts; 809 int evtch; 810 }; 811 812 /* 813 * Called on bound CPU to handle event_set_handler() 814 * caller (on initiating CPU) holds cpu_lock on our behalf 815 * arg1: struct event_set_handler_args * 816 * arg2: NULL 817 */ 818 819 static void 820 event_set_handler_xcall(void *arg1, void *arg2) 821 { 822 struct event_set_handler_args *esh_args = arg1; 823 struct intrhand **ihp, *ih = esh_args->ih; 824 struct evtsource *evts = esh_args->evts; 825 826 const u_long psl = x86_read_psl(); 827 x86_disable_intr(); 828 /* sort by IPL order, higher first */ 829 for (ihp = &evts->ev_handlers; *ihp != NULL; 830 ihp = &((*ihp)->ih_evt_next)) { 831 if ((*ihp)->ih_level < ih->ih_level) 832 break; 833 } 834 /* insert before *ihp */ 835 ih->ih_evt_next = *ihp; 836 *ihp = ih; 837 #ifndef XENPV 838 evts->ev_isl->is_handlers = evts->ev_handlers; 839 #endif 840 /* register per-cpu handler for spllower() */ 841 struct cpu_info *ci = ih->ih_cpu; 842 int sir = XEN_IPL2SIR(ih->ih_level); 843 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); 844 845 KASSERT(ci == curcpu()); 846 if (ci->ci_isources[sir] == NULL) { 847 KASSERT(esh_args->ipls != NULL); 848 ci->ci_isources[sir] = esh_args->ipls; 849 } 850 struct intrsource *ipls = ci->ci_isources[sir]; 851 ih->ih_next = ipls->is_handlers; 852 ipls->is_handlers = ih; 853 x86_intr_calculatemasks(ci); 854 855 intr_calculatemasks(evts, esh_args->evtch, ci); 856 x86_write_psl(psl); 857 } 858 859 struct intrhand * 860 event_set_handler(int evtch, int (*func)(void *), void *arg, int level, 861 const char *intrname, const char *xname, bool mpsafe, struct cpu_info *ci) 862 { 863 struct event_set_handler_args esh_args; 864 char intrstr_buf[INTRIDBUF]; 865 bool bind = false; 866 867 memset(&esh_args, 0, sizeof(esh_args)); 868 869 /* 870 * if ci is not specified, we bind to the current cpu. 871 * if ci has been proviced by the called, we assume 872 * he will do the EVTCHNOP_bind_vcpu if needed. 873 */ 874 if (ci == NULL) { 875 ci = curcpu(); 876 bind = true; 877 } 878 879 880 #ifdef IRQ_DEBUG 881 printf("event_set_handler IRQ %d handler %p\n", evtch, func); 882 #endif 883 884 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); 885 KASSERTMSG(evtch < NR_EVENT_CHANNELS, 886 "evtch number %d > NR_EVENT_CHANNELS", evtch); 887 KASSERT(xname != NULL); 888 889 #if 0 890 printf("event_set_handler evtch %d handler %p level %d\n", evtch, 891 handler, level); 892 #endif 893 esh_args.ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP); 894 if (esh_args.ih == NULL) 895 panic("can't allocate fixed interrupt source"); 896 897 898 esh_args.ih->ih_pic = &xen_pic; 899 esh_args.ih->ih_level = level; 900 esh_args.ih->ih_fun = esh_args.ih->ih_realfun = func; 901 esh_args.ih->ih_arg = esh_args.ih->ih_realarg = arg; 902 esh_args.ih->ih_evt_next = NULL; 903 esh_args.ih->ih_next = NULL; 904 esh_args.ih->ih_pending = 0; 905 esh_args.ih->ih_cpu = ci; 906 esh_args.ih->ih_pin = evtch; 907 #ifdef MULTIPROCESSOR 908 if (!mpsafe) { 909 esh_args.ih->ih_fun = xen_intr_biglock_wrapper; 910 esh_args.ih->ih_arg = esh_args.ih; 911 } 912 #endif /* MULTIPROCESSOR */ 913 KASSERT(mpsafe || level < IPL_HIGH); 914 915 mutex_enter(&cpu_lock); 916 /* allocate IPL source if needed */ 917 int sir = XEN_IPL2SIR(level); 918 if (ci->ci_isources[sir] == NULL) { 919 struct intrsource *ipls; 920 ipls = kmem_zalloc(sizeof (struct intrsource), KM_NOSLEEP); 921 if (ipls == NULL) 922 panic("can't allocate fixed interrupt source"); 923 ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse; 924 ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume; 925 ipls->is_pic = &xen_pic; 926 esh_args.ipls = ipls; 927 /* 928 * note that we can't set ci_isources here, as 929 * the assembly can't handle is_handlers being NULL 930 */ 931 } 932 /* register handler for event channel */ 933 if (evtsource[evtch] == NULL) { 934 struct evtsource *evts; 935 evtchn_op_t op; 936 if (intrname == NULL) 937 intrname = intr_create_intrid(-1, &xen_pic, evtch, 938 intrstr_buf, sizeof(intrstr_buf)); 939 evts = kmem_zalloc(sizeof (struct evtsource), 940 KM_NOSLEEP); 941 if (evts == NULL) 942 panic("can't allocate fixed interrupt source"); 943 944 evts->ev_cpu = ci; 945 strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname)); 946 947 evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, 948 device_xname(ci->ci_dev), evts->ev_intrname); 949 if (bind) { 950 op.cmd = EVTCHNOP_bind_vcpu; 951 op.u.bind_vcpu.port = evtch; 952 op.u.bind_vcpu.vcpu = ci->ci_vcpuid; 953 if (HYPERVISOR_event_channel_op(&op) != 0) { 954 panic("Failed to bind event %d to VCPU %s %d", 955 evtch, device_xname(ci->ci_dev), 956 ci->ci_vcpuid); 957 } 958 } 959 #ifndef XENPV 960 evts->ev_isl = intr_allocate_io_intrsource(intrname); 961 evts->ev_isl->is_pic = &xen_pic; 962 #endif 963 evtsource[evtch] = evts; 964 } 965 esh_args.evts = evtsource[evtch]; 966 967 // append device name 968 if (esh_args.evts->ev_xname[0] != '\0') { 969 strlcat(esh_args.evts->ev_xname, ", ", 970 sizeof(esh_args.evts->ev_xname)); 971 } 972 strlcat(esh_args.evts->ev_xname, xname, 973 sizeof(esh_args.evts->ev_xname)); 974 975 esh_args.evtch = evtch; 976 977 if (ci == curcpu() || !mp_online) { 978 event_set_handler_xcall(&esh_args, NULL); 979 } else { 980 uint64_t where = xc_unicast(0, event_set_handler_xcall, 981 &esh_args, NULL, ci); 982 xc_wait(where); 983 } 984 985 mutex_exit(&cpu_lock); 986 return esh_args.ih; 987 } 988 989 /* 990 * Called on bound CPU to handle event_remove_handler() 991 * caller (on initiating CPU) holds cpu_lock on our behalf 992 * arg1: evtch 993 * arg2: struct intrhand *ih 994 */ 995 996 static void 997 event_remove_handler_xcall(void *arg1, void *arg2) 998 { 999 struct intrsource *ipls; 1000 struct evtsource *evts; 1001 struct intrhand **ihp; 1002 struct cpu_info *ci; 1003 struct intrhand *ih = arg2; 1004 int evtch = (intptr_t)(arg1); 1005 1006 evts = evtsource[evtch]; 1007 KASSERT(evts != NULL); 1008 KASSERT(ih != NULL); 1009 ci = ih->ih_cpu; 1010 KASSERT(ci == curcpu()); 1011 1012 const u_long psl = x86_read_psl(); 1013 x86_disable_intr(); 1014 1015 for (ihp = &evts->ev_handlers; *ihp != NULL; 1016 ihp = &(*ihp)->ih_evt_next) { 1017 if ((*ihp) == ih) 1018 break; 1019 } 1020 if (*(ihp) == NULL) { 1021 panic("event_remove_handler_xcall: not in ev_handlers"); 1022 } 1023 1024 *ihp = ih->ih_evt_next; 1025 1026 int sir = XEN_IPL2SIR(ih->ih_level); 1027 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); 1028 ipls = ci->ci_isources[sir]; 1029 for (ihp = &ipls->is_handlers; *ihp != NULL; ihp = &(*ihp)->ih_next) { 1030 if (*ihp == ih) 1031 break; 1032 } 1033 if (*ihp == NULL) 1034 panic("event_remove_handler_xcall: not in is_handlers"); 1035 *ihp = ih->ih_next; 1036 intr_calculatemasks(evts, evtch, ci); 1037 #ifndef XENPV 1038 evts->ev_isl->is_handlers = evts->ev_handlers; 1039 #endif 1040 if (evts->ev_handlers == NULL) 1041 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); 1042 1043 x86_write_psl(psl); 1044 } 1045 1046 int 1047 event_remove_handler(int evtch, int (*func)(void *), void *arg) 1048 { 1049 struct intrhand *ih; 1050 struct cpu_info *ci; 1051 struct evtsource *evts; 1052 1053 mutex_enter(&cpu_lock); 1054 evts = evtsource[evtch]; 1055 if (evts == NULL) 1056 return ENOENT; 1057 1058 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { 1059 if (ih->ih_realfun == func && ih->ih_realarg == arg) 1060 break; 1061 } 1062 if (ih == NULL) { 1063 mutex_exit(&cpu_lock); 1064 return ENOENT; 1065 } 1066 ci = ih->ih_cpu; 1067 1068 if (ci == curcpu() || !mp_online) { 1069 event_remove_handler_xcall((void *)(intptr_t)evtch, ih); 1070 } else { 1071 uint64_t where = xc_unicast(0, event_remove_handler_xcall, 1072 (void *)(intptr_t)evtch, ih, ci); 1073 xc_wait(where); 1074 } 1075 1076 kmem_free(ih, sizeof (struct intrhand)); 1077 if (evts->ev_handlers == NULL) { 1078 #ifndef XENPV 1079 KASSERT(evts->ev_isl->is_handlers == NULL); 1080 intr_free_io_intrsource(evts->ev_intrname); 1081 #endif 1082 evcnt_detach(&evts->ev_evcnt); 1083 kmem_free(evts, sizeof (struct evtsource)); 1084 evtsource[evtch] = NULL; 1085 } 1086 mutex_exit(&cpu_lock); 1087 return 0; 1088 } 1089 1090 #if defined(XENPV) && (NPCI > 0 || NISA > 0) 1091 void 1092 hypervisor_prime_pirq_event(int pirq, unsigned int evtch) 1093 { 1094 struct physdev_irq_status_query irq_status; 1095 irq_status.irq = pirq; 1096 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status) < 0) 1097 panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)"); 1098 if (irq_status.flags & XENIRQSTAT_needs_eoi) { 1099 evtch_to_pirq_eoi[evtch] = pirq; 1100 #ifdef IRQ_DEBUG 1101 printf("pirq %d needs notify\n", pirq); 1102 #endif 1103 } 1104 } 1105 1106 void 1107 hypervisor_ack_pirq_event(unsigned int evtch) 1108 { 1109 #ifdef IRQ_DEBUG 1110 if (evtch == IRQ_DEBUG) 1111 printf("%s: evtch %d\n", __func__, evtch); 1112 #endif 1113 1114 if (evtch_to_pirq_eoi[evtch] > 0) { 1115 struct physdev_eoi eoi; 1116 eoi.irq = evtch_to_pirq_eoi[evtch]; 1117 #ifdef IRQ_DEBUG 1118 if (evtch == IRQ_DEBUG) 1119 printf("pirq_notify(%d)\n", evtch); 1120 #endif 1121 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); 1122 } 1123 } 1124 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */ 1125 1126 int 1127 xen_debug_handler(void *arg) 1128 { 1129 struct cpu_info *ci = curcpu(); 1130 int i; 1131 int xci_ilevel = ci->ci_ilevel; 1132 int xci_ipending = ci->ci_ipending; 1133 int xci_idepth = ci->ci_idepth; 1134 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; 1135 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; 1136 u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; 1137 unsigned long evtchn_mask[sizeof(unsigned long) * 8]; 1138 unsigned long evtchn_pending[sizeof(unsigned long) * 8]; 1139 1140 u_long p; 1141 1142 p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; 1143 memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); 1144 p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; 1145 memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); 1146 1147 __insn_barrier(); 1148 printf("debug event\n"); 1149 printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", 1150 xci_ilevel, xci_ipending, xci_idepth); 1151 printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" 1152 " evtchn_pending_sel 0x%lx\n", 1153 upcall_pending, upcall_mask, pending_sel); 1154 printf("evtchn_mask"); 1155 for (i = 0 ; i <= LONG_MASK; i++) 1156 printf(" %lx", (u_long)evtchn_mask[i]); 1157 printf("\n"); 1158 printf("evtchn_pending"); 1159 for (i = 0 ; i <= LONG_MASK; i++) 1160 printf(" %lx", (u_long)evtchn_pending[i]); 1161 printf("\n"); 1162 return 0; 1163 } 1164 1165 static struct evtsource * 1166 event_get_handler(const char *intrid) 1167 { 1168 for (int i = 0; i < NR_EVENT_CHANNELS; i++) { 1169 if (evtsource[i] == NULL || i == debug_port) 1170 continue; 1171 1172 struct evtsource *evp = evtsource[i]; 1173 1174 if (strcmp(evp->ev_intrname, intrid) == 0) 1175 return evp; 1176 } 1177 1178 return NULL; 1179 } 1180 1181 static uint64_t 1182 xen_intr_get_count(const char *intrid, u_int cpu_idx) 1183 { 1184 int count = 0; 1185 struct evtsource *evp; 1186 1187 mutex_spin_enter(&evtchn_lock); 1188 1189 evp = event_get_handler(intrid); 1190 if (evp != NULL && cpu_idx == cpu_index(evp->ev_cpu)) 1191 count = evp->ev_evcnt.ev_count; 1192 1193 mutex_spin_exit(&evtchn_lock); 1194 1195 return count; 1196 } 1197 1198 static void 1199 xen_intr_get_assigned(const char *intrid, kcpuset_t *cpuset) 1200 { 1201 struct evtsource *evp; 1202 1203 kcpuset_zero(cpuset); 1204 1205 mutex_spin_enter(&evtchn_lock); 1206 1207 evp = event_get_handler(intrid); 1208 if (evp != NULL) 1209 kcpuset_set(cpuset, cpu_index(evp->ev_cpu)); 1210 1211 mutex_spin_exit(&evtchn_lock); 1212 } 1213 1214 static void 1215 xen_intr_get_devname(const char *intrid, char *buf, size_t len) 1216 { 1217 struct evtsource *evp; 1218 1219 mutex_spin_enter(&evtchn_lock); 1220 1221 evp = event_get_handler(intrid); 1222 strlcpy(buf, evp ? evp->ev_xname : "unknown", len); 1223 1224 mutex_spin_exit(&evtchn_lock); 1225 } 1226 1227 #ifdef XENPV 1228 /* 1229 * MI interface for subr_interrupt. 1230 */ 1231 struct intrids_handler * 1232 interrupt_construct_intrids(const kcpuset_t *cpuset) 1233 { 1234 struct intrids_handler *ii_handler; 1235 intrid_t *ids; 1236 int i, count, off; 1237 struct evtsource *evp; 1238 1239 if (kcpuset_iszero(cpuset)) 1240 return 0; 1241 1242 /* 1243 * Count the number of interrupts which affinity to any cpu of "cpuset". 1244 */ 1245 count = 0; 1246 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 1247 evp = evtsource[i]; 1248 1249 if (evp == NULL || i == debug_port) 1250 continue; 1251 1252 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) 1253 continue; 1254 1255 count++; 1256 } 1257 1258 ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, 1259 KM_SLEEP); 1260 if (ii_handler == NULL) 1261 return NULL; 1262 ii_handler->iih_nids = count; 1263 if (count == 0) 1264 return ii_handler; 1265 1266 ids = ii_handler->iih_intrids; 1267 mutex_spin_enter(&evtchn_lock); 1268 for (i = 0, off = 0; i < NR_EVENT_CHANNELS && off < count; i++) { 1269 evp = evtsource[i]; 1270 1271 if (evp == NULL || i == debug_port) 1272 continue; 1273 1274 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) 1275 continue; 1276 1277 snprintf(ids[off], sizeof(intrid_t), "%s", evp->ev_intrname); 1278 off++; 1279 } 1280 mutex_spin_exit(&evtchn_lock); 1281 return ii_handler; 1282 } 1283 __strong_alias(interrupt_get_count, xen_intr_get_count); 1284 __strong_alias(interrupt_get_assigned, xen_intr_get_assigned); 1285 __strong_alias(interrupt_get_devname, xen_intr_get_devname); 1286 __strong_alias(x86_intr_get_count, xen_intr_get_count); 1287 __strong_alias(x86_intr_get_assigned, xen_intr_get_assigned); 1288 __strong_alias(x86_intr_get_devname, xen_intr_get_devname); 1289 #endif /* XENPV */ 1290