1 /* $NetBSD: interrupt.c,v 1.12 2023/10/06 11:45:16 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 5 * All rights reserved. 6 * 7 * Authors: Keith Bostic, Chris G. Demetriou 8 * 9 * Permission to use, copy, modify and distribute this software and 10 * its documentation is hereby granted, provided that both the copyright 11 * notice and this permission notice appear in all copies of the 12 * software, derivative works or modified versions, and any portions 13 * thereof, and that both notices appear in supporting documentation. 14 * 15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 18 * 19 * Carnegie Mellon requests users of this software to return to 20 * 21 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU 22 * School of Computer Science 23 * Carnegie Mellon University 24 * Pittsburgh PA 15213-3890 25 * 26 * any improvements or extensions that they make and grant Carnegie the 27 * rights to redistribute these changes. 28 */ 29 /*- 30 * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center. 31 * Redistribute and modify at will, leaving only this additional copyright 32 * notice. 33 */ 34 35 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 36 __KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.12 2023/10/06 11:45:16 skrll Exp $"); 37 38 #include "opt_ddb.h" 39 40 #include <sys/param.h> 41 #include <sys/evcnt.h> 42 #include <sys/lwp.h> 43 #include <sys/proc.h> 44 #include <sys/kmem.h> 45 #include <sys/sched.h> 46 47 #include <machine/clock.h> 48 #include <machine/cpu.h> 49 #include <machine/cpufunc.h> 50 #include <machine/fpu.h> 51 #include <machine/frame.h> 52 #include <machine/intr.h> 53 #include <machine/md_var.h> 54 #include <machine/sapicvar.h> 55 #include <machine/smp.h> 56 #include <machine/userret.h> 57 58 #ifdef DDB 59 #include <ddb/ddb.h> 60 #endif 61 62 63 static void ia64_intr_eoi(void *); 64 static void ia64_intr_mask(void *); 65 #if 0 66 static void ia64_intr_unmask(void *); 67 #endif 68 static int ia64_dispatch_intr(void *, u_int); 69 70 #ifdef DDB 71 void db_print_vector(u_int, int); 72 #endif 73 74 75 int 76 interrupt(uint64_t vector, struct trapframe *tf) 77 { 78 struct cpu_info *ci = curcpu(); 79 volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK; 80 uint64_t adj, clk, itc; 81 int64_t delta; 82 uint8_t inta; 83 int count, handled = 0; 84 85 ia64_set_fpsr(IA64_FPSR_DEFAULT); 86 87 ci->ci_intrdepth++; 88 ci->ci_data.cpu_nintr++; 89 90 next: 91 /* 92 * Handle ExtINT interrupts by generating an INTA cycle to 93 * read the vector. 94 */ 95 if (vector == 0) { 96 inta = ib->ib_inta; 97 printf("ExtINT interrupt: vector=%u\n", (int)inta); 98 if (inta == 15) { 99 __asm __volatile("mov cr.eoi = r0;; srlz.d"); 100 goto stray; 101 } 102 vector = (int)inta; 103 } else if (vector == 15) 104 goto stray; 105 106 if (vector == CLOCK_VECTOR) {/* clock interrupt */ 107 itc = ia64_get_itc(); 108 109 adj = ci->ci_clockadj; 110 clk = ci->ci_clock; 111 delta = itc - clk; 112 count = 0; 113 while (delta >= ia64_clock_reload) { 114 /* Only the BSP runs the real clock */ 115 if (ci->ci_cpuid == 0) 116 hardclock((struct clockframe *)tf); 117 else 118 panic("CLOCK_VECTOR occur on not cpu0"); 119 delta -= ia64_clock_reload; 120 clk += ia64_clock_reload; 121 count++; 122 handled = 1; 123 } 124 ia64_set_itm(ia64_get_itc() + ia64_clock_reload - adj); 125 if (count > 0) { 126 if (delta > (ia64_clock_reload >> 3)) { 127 adj = ia64_clock_reload >> 4; 128 } else 129 adj = 0; 130 } else 131 adj = 0; 132 ci->ci_clock = clk; 133 ci->ci_clockadj = adj; 134 ia64_srlz_d(); 135 136 #ifdef MULTIPROCESSOR 137 } else if (vector == ipi_vector[IPI_AST]) { 138 asts[PCPU_GET(cpuid)]++; 139 CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); 140 } else if (vector == ipi_vector[IPI_HIGH_FP]) { 141 struct thread *thr = PCPU_GET(fpcurthread); 142 143 if (thr != NULL) { 144 mtx_lock_spin(&thr->td_md.md_highfp_mtx); 145 save_high_fp(&thr->td_pcb->pcb_high_fp); 146 thr->td_pcb->pcb_fpcpu = NULL; 147 PCPU_SET(fpcurthread, NULL); 148 mtx_unlock_spin(&thr->td_md.md_highfp_mtx); 149 } 150 } else if (vector == ipi_vector[IPI_RENDEZVOUS]) { 151 rdvs[PCPU_GET(cpuid)]++; 152 CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); 153 enable_intr(); 154 smp_rendezvous_action(); 155 disable_intr(); 156 } else if (vector == ipi_vector[IPI_STOP]) { 157 cpumask_t mybit = PCPU_GET(cpumask); 158 159 savectx(PCPU_PTR(pcb)); 160 atomic_set_int(&stopped_cpus, mybit); 161 while ((started_cpus & mybit) == 0) 162 cpu_spinwait(); 163 atomic_clear_int(&started_cpus, mybit); 164 atomic_clear_int(&stopped_cpus, mybit); 165 } else if (vector == ipi_vector[IPI_PREEMPT]) { 166 CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid)); 167 __asm __volatile("mov cr.eoi = r0;; srlz.d"); 168 enable_intr(); 169 sched_preempt(curthread); 170 disable_intr(); 171 goto stray; 172 #endif 173 } else { 174 ci->ci_intrdepth++; 175 handled = ia64_dispatch_intr(tf, vector); 176 ci->ci_intrdepth--; 177 } 178 179 __asm __volatile("mov cr.eoi = r0;; srlz.d"); 180 vector = ia64_get_ivr(); 181 if (vector != 15) 182 goto next; 183 184 stray: 185 if (TRAPF_USERMODE(tf)) { 186 enable_intr(); 187 userret(curlwp); 188 do_ast(tf); 189 } 190 ci->ci_intrdepth--; 191 return handled; 192 } 193 194 195 /* 196 * Hardware irqs have vectors starting at this offset. 197 */ 198 #define IA64_HARDWARE_IRQ_BASE 0x20 199 200 struct ia64_intrhand { 201 int (*ih_func)(void *); 202 void *ih_arg; 203 LIST_ENTRY(ia64_intrhand) ih_q; 204 int ih_level; 205 int ih_irq; 206 }; 207 struct ia64_intr { 208 u_int irq; 209 struct sapic *sapic; 210 int type; 211 212 LIST_HEAD(, ia64_intrhand) intr_q; 213 214 char evname[32]; 215 struct evcnt evcnt; 216 }; 217 218 static struct ia64_intr *ia64_intrs[256]; 219 220 221 static void 222 ia64_intr_eoi(void *arg) 223 { 224 u_int vector = (uintptr_t)arg; 225 struct ia64_intr *i; 226 227 i = ia64_intrs[vector]; 228 if (i != NULL) 229 sapic_eoi(i->sapic, vector); 230 } 231 232 static void 233 ia64_intr_mask(void *arg) 234 { 235 u_int vector = (uintptr_t)arg; 236 struct ia64_intr *i; 237 238 i = ia64_intrs[vector]; 239 if (i != NULL) { 240 sapic_mask(i->sapic, i->irq); 241 sapic_eoi(i->sapic, vector); 242 } 243 } 244 245 #if 0 246 static void 247 ia64_intr_unmask(void *arg) 248 { 249 u_int vector = (uintptr_t)arg; 250 struct ia64_intr *i; 251 252 i = ia64_intrs[vector]; 253 if (i != NULL) 254 sapic_unmask(i->sapic, i->irq); 255 } 256 #endif 257 258 void * 259 intr_establish_xname(int irq, int type, int level, int (*func)(void *), 260 void *arg, const char *xname) 261 { 262 /* TODO: xname support */ 263 return intr_establish(irq, type, level, func, arg); 264 } 265 266 void * 267 intr_establish(int irq, int type, int level, int (*func)(void *), void *arg) 268 { 269 struct ia64_intr *i; 270 struct ia64_intrhand *ih; 271 struct sapic *sa; 272 u_int vector; 273 274 /* Get the I/O SAPIC that corresponds to the IRQ. */ 275 sa = sapic_lookup(irq); 276 if (sa == NULL) 277 return NULL; 278 279 switch (type) { 280 case IST_EDGE: 281 case IST_LEVEL: 282 break; 283 284 default: 285 return NULL; 286 } 287 288 /* 289 * XXX - There's a priority implied by the choice of vector. 290 * We should therefore relate the vector to the interrupt type. 291 */ 292 vector = irq + IA64_HARDWARE_IRQ_BASE; 293 294 i = ia64_intrs[vector]; 295 if (i == NULL) { 296 i = kmem_alloc(sizeof(struct ia64_intr), KM_SLEEP); 297 i->irq = irq; 298 i->sapic = sa; 299 i->type = type; 300 LIST_INIT(&i->intr_q); 301 snprintf(i->evname, sizeof(i->evname), "irq %d", irq); 302 evcnt_attach_dynamic(&i->evcnt, EVCNT_TYPE_INTR, NULL, 303 "iosapic", i->evname); 304 ia64_intrs[vector] = i; 305 306 sapic_config_intr(irq, type); 307 sapic_enable(i->sapic, irq, vector); 308 } else 309 if (i->type != type) 310 return NULL; 311 312 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 313 ih->ih_func = func; 314 ih->ih_arg = arg; 315 ih->ih_level = level; 316 ih->ih_irq = irq; 317 LIST_INSERT_HEAD(&i->intr_q, ih, ih_q); 318 319 return ih; 320 } 321 322 void 323 intr_disestablish(void *cookie) 324 { 325 struct ia64_intr *i; 326 struct ia64_intrhand *ih = cookie; 327 u_int vector = ih->ih_irq + IA64_HARDWARE_IRQ_BASE; 328 329 i = ia64_intrs[vector]; 330 331 LIST_REMOVE(ih, ih_q); 332 if (LIST_FIRST(&i->intr_q) == NULL) { 333 ia64_intr_mask((void *)(uintptr_t)vector); 334 335 ia64_intrs[vector] = NULL; 336 evcnt_detach(&i->evcnt); 337 kmem_free(i, sizeof(*i)); 338 } 339 340 kmem_free(ih, sizeof(*ih)); 341 } 342 343 static int 344 ia64_dispatch_intr(void *frame, u_int vector) 345 { 346 struct ia64_intr *i; 347 struct ia64_intrhand *ih; 348 int handled = 0; 349 350 /* 351 * Find the interrupt thread for this vector. 352 */ 353 i = ia64_intrs[vector]; 354 KASSERT(i != NULL); 355 356 i->evcnt.ev_count++; 357 358 LIST_FOREACH(ih, &i->intr_q, ih_q) { 359 if (__predict_false(ih->ih_func == NULL)) 360 printf("%s: spurious interrupt (irq = %d)\n", 361 __func__, ih->ih_irq); 362 else if (__predict_true((*ih->ih_func)(ih->ih_arg))) 363 handled = 1; 364 } 365 ia64_intr_eoi((void *)(uintptr_t)vector); 366 367 return handled; 368 } 369 370 void 371 ia64_handle_intr(void *tf) 372 { 373 panic("XXX %s not implemented", __func__); 374 } 375 376 #ifdef DDB 377 void 378 db_print_vector(u_int vector, int always) 379 { 380 struct ia64_intr *i; 381 382 i = ia64_intrs[vector]; 383 if (i != NULL) { 384 db_printf("vector %u (%p): ", vector, i); 385 sapic_print(i->sapic, i->irq); 386 } else if (always) 387 db_printf("vector %u: unassigned\n", vector); 388 } 389 390 const char * 391 intr_string(intr_handle_t ih, char *buf, size_t len) 392 { 393 panic("XXX %s not implemented", __func__); 394 } 395 #endif 396