1 /* $NetBSD: intvec.S,v 1.23 2022/09/02 23:48:10 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1994, 1997 Ludd, University of Lule}, Sweden. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 #include "assym.h" 30 #include <sys/cdefs.h> 31 32 #include "opt_ddb.h" 33 #include "opt_cputype.h" 34 #include "opt_emulate.h" 35 #include "opt_multiprocessor.h" 36 #include "opt_lockdebug.h" 37 #include "leds.h" 38 39 #define SCBENTRY(name) \ 40 .text ; \ 41 .align 2 ; \ 42 .globl __CONCAT(X,name) ; \ 43 __CONCAT(X,name): 44 45 #define TRAPCALL(namn, typ) \ 46 SCBENTRY(namn) ; \ 47 pushl $0 ; \ 48 pushl $typ ; \ 49 jbr Xtrap 50 51 #define TRAPARGC(namn, typ) \ 52 SCBENTRY(namn) ; \ 53 pushl $typ ; \ 54 jbr Xtrap 55 56 #define FASTINTR(namn, rutin) \ 57 SCBENTRY(namn) ; \ 58 pushr $0x3f ; \ 59 calls $0,_C_LABEL(rutin) ; \ 60 popr $0x3f ; \ 61 rei 62 63 #define PUSHR pushr $0x3f 64 #define POPR popr $0x3f 65 66 #define KSTACK 0 67 #define ISTACK 1 68 #define NOVEC .long 0 69 #define INTVEC(label,stack) \ 70 .long __CONCAT(X,label)+stack; 71 72 .text 73 74 .globl _C_LABEL(kernbase), _C_LABEL(rpb), _C_LABEL(kernel_text) 75 .set _C_LABEL(kernel_text),KERNBASE 76 _C_LABEL(kernbase): 77 _C_LABEL(rpb): 78 /* 79 * First page in memory we have rpb; so that we know where 80 * (must be on a 64k page boundary, easiest here). We use it 81 * to store SCB vectors generated when compiling the kernel, 82 * and move the SCB later to somewhere else. 83 */ 84 85 NOVEC; # Unused, 0 86 INTVEC(mcheck, ISTACK) # Machine Check., 4 87 INTVEC(invkstk, ISTACK) # Kernel Stack Invalid., 8 88 NOVEC; # Power Failed., C 89 INTVEC(privinflt, KSTACK) # Privileged/Reserved Instruction. 90 INTVEC(xfcflt, KSTACK) # Customer Reserved Instruction, 14 91 INTVEC(resopflt, KSTACK) # Reserved Operand/Boot Vector(?), 18 92 INTVEC(resadflt, KSTACK) # Reserved Address Mode., 1C 93 INTVEC(access_v, KSTACK) # Access Control Violation, 20 94 INTVEC(transl_v, KSTACK) # Translation Invalid, 24 95 INTVEC(tracep, KSTACK) # Trace Pending, 28 96 INTVEC(breakp, KSTACK) # Breakpoint Instruction, 2C 97 NOVEC; # Compatibility Exception, 30 98 INTVEC(arithflt, KSTACK) # Arithmetic Fault, 34 99 NOVEC; # Unused, 38 100 NOVEC; # Unused, 3C 101 INTVEC(syscall, KSTACK) # main syscall trap, chmk, 40 102 INTVEC(chmx, KSTACK) # chme, 44 103 INTVEC(chmx, KSTACK) # chms, 48 104 INTVEC(chmx, KSTACK) # chmu, 4C 105 NOVEC; # System Backplane Exception/BIerror, 50 106 INTVEC(cmrerr, ISTACK) # Corrected Memory Read, 54 107 NOVEC; # System Backplane Alert/RXCD, 58 108 INTVEC(sbiflt, ISTACK) # System Backplane Fault, 5C 109 NOVEC; # Memory Write Timeout, 60 110 NOVEC; # Unused, 64 111 NOVEC; # Unused, 68 112 NOVEC; # Unused, 6C 113 NOVEC; # Unused, 70 114 NOVEC; # Unused, 74 115 NOVEC; # Unused, 78 116 NOVEC; # Unused, 7C 117 NOVEC; # Unused, 80 118 NOVEC; # Unused, 84 119 INTVEC(astintr, KSTACK) # Asynchronous Sustem Trap, AST (IPL 02) 120 NOVEC; # Unused, 8C 121 NOVEC; # Unused, 90 122 NOVEC; # Unused, 94 123 NOVEC; # Unused, 98 124 NOVEC; # Unused, 9C 125 INTVEC(softclock, KSTACK); # Software clock interrupt, A0 (IPL 08) 126 NOVEC; # Unused, A4 (IPL 09) 127 NOVEC; # Unused, A8 (IPL 10) 128 INTVEC(softbio, KSTACK); # Software bio interrupt, AC (IPL 11) 129 INTVEC(softnet, KSTACK); # Software net interrupt, B0 (IPL 12) 130 INTVEC(softserial, KSTACK); # Software serial interrupt, B4 (IPL 13) 131 NOVEC; # Unused, B8 (IPL 14) 132 INTVEC(ddbtrap, ISTACK) # Kernel debugger trap, BC (IPL 15) 133 INTVEC(hardclock,ISTACK) # Interval Timer 134 NOVEC; # Unused, C4 135 INTVEC(emulate, KSTACK) # Subset instruction emulation, C8 136 NOVEC; # Unused, CC 137 NOVEC; # Unused, D0 138 NOVEC; # Unused, D4 139 NOVEC; # Unused, D8 140 NOVEC; # Unused, DC 141 NOVEC; # Unused, E0 142 NOVEC; # Unused, E4 143 NOVEC; # Unused, E8 144 NOVEC; # Unused, EC 145 NOVEC; 146 NOVEC; 147 NOVEC; 148 NOVEC; 149 150 /* space for adapter vectors */ 151 .space 0x100 152 153 .align 2 154 # 155 # mcheck is the badaddress trap, also called when referencing 156 # a invalid address (busserror) 157 # _memtest (memtest in C) holds the address to continue execution 158 # at when returning from a intentional test. 159 # 160 SCBENTRY(mcheck) 161 tstl _C_LABEL(cold) # Ar we still in coldstart? 162 bneq L4 # Yes. 163 164 pushr $0x7f 165 pushab 24(%sp) 166 movl _C_LABEL(dep_call),%r6 # CPU dependent mchk handling 167 calls $1,*MCHK(%r6) 168 tstl %r0 # If not machine check, try memory error 169 beql 1f 170 calls $0,*MEMERR(%r6) 171 pushab 2f 172 calls $1,_C_LABEL(panic) 173 2: .asciz "mchk" 174 1: popr $0x7f 175 addl2 (%sp)+,%sp 176 177 rei 178 179 L4: addl2 (%sp)+,%sp # remove info pushed on stack 180 pushr $0x3f # save regs for clobbering 181 movl _C_LABEL(dep_call),%r0 # get CPU-specific mchk handler 182 tstl BADADDR(%r0) # any handler available? 183 bneq 4f # yep, call it 184 popr $0x3f # nope, restore regs 185 brb 0f # continue 186 4: calls $0,*BADADDR(%r0) # call machine-specific handler 187 popr $0x3f # restore regs 188 brb 2f 189 190 0: cmpl _C_LABEL(vax_cputype),$1 # Is it a 11/780? 191 bneq 1f # No... 192 193 mtpr $0, $PR_SBIFS # Clear SBI fault register 194 brb 2f 195 196 1: cmpl _C_LABEL(vax_cputype),$4 # Is it a 8600? 197 bneq 3f 198 199 mtpr $0, $PR_EHSR # Clear Error status register 200 brb 2f 201 202 3: mtpr $0xF,$PR_MCESR # clear the bus error bit 203 2: movl _C_LABEL(memtest),(%sp) # REI to new address 204 rei 205 206 TRAPCALL(invkstk, T_KSPNOTVAL) 207 208 SCBENTRY(privinflt) # Privileged/unimplemented instruction 209 #ifndef NO_INSN_EMULATE 210 jsb _C_LABEL(unimemu) # do not return if insn emulated 211 #endif 212 pushl $0 213 pushl $T_PRIVINFLT 214 jbr Xtrap 215 216 TRAPCALL(xfcflt, T_XFCFLT); 217 TRAPCALL(resopflt, T_RESOPFLT) 218 TRAPCALL(resadflt, T_RESADFLT) 219 220 /* 221 * default handler for CHME and CHMS 222 */ 223 SCBENTRY(chmx) 224 clrl (%sp) # CHM code already on stack 225 pushl $T_RESOPFLT 226 jbr Xtrap 227 228 /* 229 * Translation fault, used only when simulating page reference bit. 230 * Therefore it is done a fast revalidation of the page if it is 231 * referenced. Trouble here is the hardware bug on KA650 CPUs that 232 * put in a need for an extra check when the fault is gotten during 233 * PTE reference. Handled in pmap.c. 234 */ 235 SCBENTRY(transl_v) # 20: Translation violation 236 pushr $0x3f 237 pushl 28(%sp) 238 pushl 28(%sp) 239 calls $2,_C_LABEL(pmap_simulref) 240 tstl %r0 241 bneq 1f 242 popr $0x3f 243 addl2 $8,%sp 244 rei 245 1: popr $0x3f 246 brw Xaccess_v 247 248 SCBENTRY(access_v) # 24: Access cntrl viol fault 249 blbs (%sp), ptelen 250 pushl $T_ACCFLT 251 bbc $1,4(%sp),1f 252 bisl2 $T_PTEFETCH,(%sp) 253 1: bbc $2,4(%sp),2f 254 bisl2 $T_WRITE,(%sp) 255 2: movl (%sp), 4(%sp) 256 addl2 $4, %sp 257 jbr Xtrap 258 259 ptelen: movl $T_PTELEN, (%sp) # PTE must expand (or send segv) 260 jbr Xtrap; 261 262 TRAPCALL(tracep, T_TRCTRAP) 263 TRAPCALL(breakp, T_BPTFLT) 264 265 TRAPARGC(arithflt, T_ARITHFLT) 266 267 SCBENTRY(syscall) # Main system call 268 #if 1 269 cmpl (%sp), $SYS__lwp_getprivate 270 bneq 1f 271 mfpr $PR_SSP, %r0 # get curlwp 272 movl L_PRIVATE(%r0), %r0 # get l_private 273 addl2 $4, %sp # eat the code 274 rei 275 1: 276 #endif 277 pushl $T_SYSCALL 278 pushr $0xfff 279 mfpr $PR_USP, -(%sp) 280 mfpr $PR_SSP, %r0 /* SSP contains curlwp */ 281 movl L_PROC(%r0), %r0 282 pushl %ap 283 pushl %fp 284 pushl %sp # pointer to syscall frame; defined in trap.h 285 calls $1, *P_MD_SYSCALL(%r0) 286 movl (%sp)+, %fp 287 movl (%sp)+, %ap 288 mtpr (%sp)+, $PR_USP 289 popr $0xfff 290 addl2 $8, %sp 291 mtpr $IPL_HIGH, $PR_IPL # Be sure we can REI 292 rei 293 294 295 SCBENTRY(cmrerr) 296 PUSHR 297 movl _C_LABEL(dep_call),%r0 298 calls $0,*MEMERR(%r0) 299 POPR 300 rei 301 302 SCBENTRY(sbiflt); 303 pushab sbifltmsg 304 calls $1, _C_LABEL(panic) 305 306 TRAPCALL(astintr, T_ASTFLT) 307 308 TRAPCALL(ddbtrap, T_KDBTRAP) 309 310 SCBENTRY(hardclock) 311 #ifdef DDB 312 tstl 0x80000100 # rpb wait element 313 beql 1f # set, jmp to debugger 314 pushl $0 315 pushl $T_KDBTRAP 316 jbr Xtrap 317 #endif 318 1: pushr $0x3f 319 mfpr $PR_ICCS,%r0 320 tstl %r0 321 bgeq 2f 322 incl _C_LABEL(clock_misscnt)+EV_COUNT 323 adwc $0,_C_LABEL(clock_misscnt)+EV_COUNT+4 324 2: mtpr $0x800000c1,$PR_ICCS # Reset interrupt flag 325 incl _C_LABEL(clock_intrcnt)+EV_COUNT # count the number of clock interrupts 326 adwc $0,_C_LABEL(clock_intrcnt)+EV_COUNT+4 327 328 mfpr $PR_SSP, %r0 /* SSP contains curlwp */ 329 movl L_CPU(%r0), %r0 /* get current CPU */ 330 incl CI_NINTR(%r0) 331 adwc $0,(CI_NINTR+4)(%r0) 332 333 #if VAX46 || VAXANY 334 cmpl _C_LABEL(vax_boardtype),$VAX_BTYP_46 335 bneq 1f 336 movl _C_LABEL(ka46_cpu),%r0 337 clrl VC_DIAGTIMM(%r0) 338 #endif 339 1: pushl %sp 340 addl2 $24,(%sp) 341 calls $1,_C_LABEL(hardclock) 342 #if NLEDS 343 calls $0,_C_LABEL(leds_intr) 344 #endif 345 popr $0x3f 346 rei 347 348 /* 349 * Main routine for traps; all go through this. 350 * Note that we put USP on the frame here, which sometimes should 351 * be KSP to be correct, but because we only alters it when we are 352 * called from user space it doesn't care. 353 * _sret is used in cpu_set_kpc to jump out to user space first time. 354 */ 355 .globl _C_LABEL(sret) 356 Xtrap: pushr $0xfff 357 mfpr $PR_USP, -(%sp) 358 pushl %ap 359 pushl %fp 360 pushl %sp 361 calls $1, _C_LABEL(trap) 362 _C_LABEL(sret): 363 movl (%sp)+, %fp 364 movl (%sp)+, %ap 365 mtpr (%sp)+, $PR_USP 366 popr $0xfff 367 addl2 $8, %sp 368 mtpr $IPL_HIGH, $PR_IPL # Be sure we can REI 369 rei 370 371 sbifltmsg: 372 .asciz "SBI fault" 373 374 #ifndef NO_INSN_EMULATE 375 /* 376 * Table of emulated Microvax instructions supported by emulate.s. 377 * Use noemulate to convert unimplemented ones to reserved instruction faults. 378 */ 379 .globl _C_LABEL(emtable) 380 _C_LABEL(emtable): 381 /* f8 */ .long _C_LABEL(EMashp); .long _C_LABEL(EMcvtlp) 382 /* fa */ .long noemulate; .long noemulate 383 /* fc */ .long noemulate; .long noemulate 384 /* fe */ .long noemulate; .long noemulate 385 /* 00 */ .long noemulate; .long noemulate 386 /* 02 */ .long noemulate; .long noemulate 387 /* 04 */ .long noemulate; .long noemulate 388 /* 05 */ .long noemulate; .long noemulate 389 /* 08 */ .long _C_LABEL(EMcvtps); .long _C_LABEL(EMcvtsp) 390 /* 0a */ .long noemulate; .long _C_LABEL(EMcrc) 391 /* 0c */ .long noemulate; .long noemulate 392 /* 0e */ .long noemulate; .long noemulate 393 /* 10 */ .long noemulate; .long noemulate 394 /* 12 */ .long noemulate; .long noemulate 395 /* 14 */ .long noemulate; .long noemulate 396 /* 16 */ .long noemulate; .long noemulate 397 /* 18 */ .long noemulate; .long noemulate 398 /* 1a */ .long noemulate; .long noemulate 399 /* 1c */ .long noemulate; .long noemulate 400 /* 1e */ .long noemulate; .long noemulate 401 /* 20 */ .long _C_LABEL(EMaddp4); .long _C_LABEL(EMaddp6) 402 /* 22 */ .long _C_LABEL(EMsubp4); .long _C_LABEL(EMsubp6) 403 /* 24 */ .long _C_LABEL(EMcvtpt); .long _C_LABEL(EMmulp) 404 /* 26 */ .long _C_LABEL(EMcvttp); .long _C_LABEL(EMdivp) 405 /* 28 */ .long noemulate; .long _C_LABEL(EMcmpc3) 406 /* 2a */ .long _C_LABEL(EMscanc); .long _C_LABEL(EMspanc) 407 /* 2c */ .long noemulate; .long _C_LABEL(EMcmpc5) 408 /* 2e */ .long _C_LABEL(EMmovtc); .long _C_LABEL(EMmovtuc) 409 /* 30 */ .long noemulate; .long noemulate 410 /* 32 */ .long noemulate; .long noemulate 411 /* 34 */ .long _C_LABEL(EMmovp); .long _C_LABEL(EMcmpp3) 412 /* 36 */ .long _C_LABEL(EMcvtpl); .long _C_LABEL(EMcmpp4) 413 /* 38 */ .long _C_LABEL(EMeditpc); .long _C_LABEL(EMmatchc) 414 /* 3a */ .long _C_LABEL(EMlocc); .long _C_LABEL(EMskpc) 415 #endif 416 /* 417 * The following is called with the stack set up as follows: 418 * 419 * (%sp): Opcode 420 * 4(%sp): Instruction PC 421 * 8(%sp): Operand 1 422 * 12(%sp): Operand 2 423 * 16(%sp): Operand 3 424 * 20(%sp): Operand 4 425 * 24(%sp): Operand 5 426 * 28(%sp): Operand 6 427 * 32(%sp): Operand 7 (unused) 428 * 36(%sp): Operand 8 (unused) 429 * 40(%sp): Return PC 430 * 44(%sp): Return PSL 431 * 48(%sp): TOS before instruction 432 * 433 * Each individual routine is called with the stack set up as follows: 434 * 435 * (%sp): Return address of trap handler 436 * 4(%sp): Opcode (will get return PSL) 437 * 8(%sp): Instruction PC 438 * 12(%sp): Operand 1 439 * 16(%sp): Operand 2 440 * 20(%sp): Operand 3 441 * 24(%sp): Operand 4 442 * 28(%sp): Operand 5 443 * 32(%sp): Operand 6 444 * 36(%sp): saved register 11 445 * 40(%sp): saved register 10 446 * 44(%sp): Return PC 447 * 48(%sp): Return PSL 448 * 52(%sp): TOS before instruction 449 * See the VAX Architecture Reference Manual, Section B-5 for more 450 * information. 451 */ 452 453 SCBENTRY(emulate) 454 #ifndef NO_INSN_EMULATE 455 movl %r11,32(%sp) # save register %r11 in unused operand 456 movl %r10,36(%sp) # save register %r10 in unused operand 457 cvtbl (%sp),%r10 # get opcode 458 addl2 $8,%r10 # shift negative opcodes 459 subl3 %r10,$0x43,%r11 # forget it if opcode is out of range 460 bcs noemulate 461 movl _C_LABEL(emtable)[%r10],%r10 462 # call appropriate emulation routine 463 jsb (%r10) # routines put return values into regs 0-5 464 movl 32(%sp),%r11 # restore register %r11 465 movl 36(%sp),%r10 # restore register %r10 466 insv (%sp),$0,$4,44(%sp) # and condition codes in Opcode spot 467 addl2 $40,%sp # adjust stack for return 468 rei 469 noemulate: 470 addl2 $48,%sp # adjust stack for 471 #endif 472 .word 0xffff # "reserved instruction fault" 473