1 /* $NetBSD: exception.S,v 1.7 2023/10/06 11:45:16 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2003,2004 Marcel Moolenaar 5 * Copyright (c) 2000 Doug Rabson 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <machine/asm.h> 31 /* __FBSDID("$FreeBSD: releng/10.1/sys/ia64/ia64/exception.S 268200 2014-07-02 23:47:43Z marcel $"); */ 32 33 #include "assym.h" 34 35 /* 36 * Nested TLB restart tokens. These are used by the 37 * nested TLB handler for jumping back to the code 38 * where the nested TLB was caused. 39 */ 40 #define NTLBRT_SAVE 0x12c12c 41 #define NTLBRT_RESTORE 0x12c12d 42 43 /* 44 * ar.k7 = kernel memory stack 45 * ar.k6 = kernel register stack 46 * ar.k5 = EPC gateway page 47 * ar.k4 = PCPU data 48 */ 49 50 .section .ivt.data, "aw" 51 52 .align 8 53 .global ia64_kptdir 54 .size ia64_kptdir, 8 55 ia64_kptdir: data8 0 56 57 58 #ifdef XTRACE 59 60 .align 8 61 .global ia64_xtrace_mask 62 .size ia64_xtrace_mask, 8 63 ia64_xtrace_mask: data8 0 64 65 .align 4 66 .global ia64_xtrace_enabled 67 .size ia64_xtrace_enabled, 4 68 ia64_xtrace_enabled: data4 0 69 70 #define XTRACE_HOOK(offset) \ 71 { .mii ; \ 72 nop 0 ; \ 73 mov r31 = b7 ; \ 74 mov r28 = pr ; \ 75 } ; \ 76 { .mib ; \ 77 nop 0 ; \ 78 mov r25 = ip ; \ 79 br.sptk ia64_xtrace_write ;; \ 80 } ; \ 81 { .mii ; \ 82 nop 0 ; \ 83 mov b7 = r31 ; \ 84 mov pr = r28, 0x1ffff ;; \ 85 } 86 87 .section .ivt.text, "ax" 88 89 // We can only use r25, r26 & r27 90 ENTRY_NOPROFILE(ia64_xtrace_write, 0) 91 { .mlx 92 add r25 = 16, r25 93 movl r26 = ia64_xtrace_enabled 94 ;; 95 } 96 { .mmi 97 mov r27 = ar.k3 98 ld4 r26 = [r26] 99 mov b7 = r25 100 ;; 101 } 102 { .mib 103 add r25 = -32, r25 104 cmp.eq p15,p0 = r0, r26 105 (p15) br.dptk.few b7 106 ;; 107 } 108 { .mib 109 nop 0 110 cmp.eq p15,p0 = r0, r27 111 (p15) br.dptk.few b7 112 ;; 113 } 114 { .mmi 115 st8 [r27] = r25, 8 // 0x00 IVT 116 mov r26 = ar.itc 117 nop 0 118 ;; 119 } 120 { .mmi 121 st8 [r27] = r26, 8 // 0x08 ITC 122 mov r25 = cr.iip 123 nop 0 124 ;; 125 } 126 { .mmi 127 st8 [r27] = r25, 8 // 0x10 IIP 128 mov r26 = cr.ifa 129 nop 0 130 ;; 131 } 132 { .mmi 133 st8 [r27] = r26, 8 // 0x18 IFA 134 mov r25 = cr.isr 135 nop 0 136 ;; 137 } 138 { .mmi 139 st8 [r27] = r25, 8 // 0x20 ISR 140 mov r26 = cr.ipsr 141 nop 0 142 ;; 143 } 144 { .mmi 145 st8 [r27] = r26, 8 // 0x28 IPSR 146 mov r25 = cr.itir 147 nop 0 148 ;; 149 } 150 { .mmi 151 st8 [r27] = r25, 8 // 0x30 ITIR 152 mov r26 = cr.iipa 153 nop 0 154 ;; 155 } 156 { .mmi 157 st8 [r27] = r26, 8 // 0x38 IIPA 158 mov r25 = cr.ifs 159 nop 0 160 ;; 161 } 162 { .mmi 163 st8 [r27] = r25, 8 // 0x40 IFS 164 mov r26 = cr.iim 165 nop 0 166 ;; 167 } 168 { .mmi 169 st8 [r27] = r26, 8 // 0x48 IIM 170 mov r25 = cr.iha 171 nop 0 172 ;; 173 } 174 { .mmi 175 st8 [r27] = r25, 8 // 0x50 IHA 176 mov r26 = ar.unat 177 nop 0 178 ;; 179 } 180 { .mmi 181 st8 [r27] = r26, 8 // 0x58 UNAT 182 mov r25 = ar.rsc 183 nop 0 184 ;; 185 } 186 { .mmi 187 st8 [r27] = r25, 8 // 0x60 RSC 188 mov r26 = ar.bsp 189 nop 0 190 ;; 191 } 192 { .mmi 193 st8 [r27] = r26, 8 // 0x68 BSP 194 mov r25 = r13 195 nop 0 196 ;; 197 } 198 { .mmi 199 st8 [r27] = r25, 8 // 0x70 PCPU/TLS 200 mov r26 = r12 201 nop 0 202 ;; 203 } 204 { .mlx 205 st8 [r27] = r26, 8 // 0x78 SP 206 movl r25 = ia64_xtrace_mask 207 ;; 208 } 209 { .mmi 210 ld8 r26 = [r25] 211 ;; 212 and r25 = r27, r26 213 nop 0 214 ;; 215 } 216 { .mib 217 mov ar.k3 = r25 218 nop 0 219 br.sptk b7 220 ;; 221 } 222 END(ia64_xtrace_write) 223 224 #else /* XTRACE */ 225 226 #define XTRACE_HOOK(offset) 227 228 .section .ivt.text, "ax" 229 230 #endif /* XTRACE */ 231 232 /* 233 * exception_save: save interrupted state 234 * 235 * Arguments: 236 * r16 address of bundle that contains the branch. The 237 * return address will be the next bundle. 238 * r17 the value to save as ifa in the trapframe. This 239 * normally is cr.ifa, but some interruptions set 240 * set cr.iim and not cr.ifa. 241 * 242 * Returns: 243 * p15 interrupted from user stack 244 * p14 interrupted from kernel stack 245 * p13 interrupted from user backing store 246 * p12 interrupted from kernel backing store 247 * p11 interrupts were enabled 248 * p10 interrupts were disabled 249 */ 250 ENTRY_NOPROFILE(exception_save, 0) 251 { .mii 252 mov r20=ar.unat 253 extr.u r31=sp,61,3 254 mov r18=pr 255 ;; 256 } 257 { .mmi 258 cmp.le p14,p15=IA64_VM_MINKERN_REGION,r31 259 ;; 260 (p15) mov r23=ar.k7 // kernel memory stack 261 (p14) mov r23=sp 262 ;; 263 } 264 { .mii 265 mov r21=ar.rsc 266 add r30=-SIZEOF_TRAPFRAME,r23 267 ;; 268 dep r30=0,r30,0,10 269 ;; 270 } 271 { .mmi 272 mov ar.rsc=0 273 mov r22=cr.iip 274 addl r29=NTLBRT_SAVE,r0 // 22-bit restart token. 275 ;; 276 } 277 278 /* 279 * We have a 1KB aligned trapframe, pointed to by r30. We can't 280 * reliably write to the trapframe using virtual addressing, due 281 * to the fact that TC entries we depend on can be removed by: 282 * 1. ptc.g instructions issued by other threads/cores/CPUs, or 283 * 2. TC modifications in another thread on the same core. 284 * When our TC entry gets removed, we get nested TLB faults and 285 * since no state is saved, we can only deal with those when 286 * explicitly coded and expected. 287 * As such, we switch to physical addressing and account for the 288 * fact that the tpa instruction can cause a nested TLB fault. 289 * Since the data nested TLB fault does not preserve any state, 290 * we have to be careful what we clobber. Consequently, we have 291 * to be careful what we use here. Below a list of registers that 292 * are considered alive: 293 * r16,r17=arguments 294 * r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS 295 * r29=restart token 296 * r30=trapframe pointers 297 * p14,p15=memory stack switch 298 */ 299 exception_save_restart: 300 tpa r24=r30 // Nested TLB fault possible 301 sub r19=r23,r30 302 nop 0 303 ;; 304 305 rsm psr.dt 306 add r29=16,r19 // Clobber restart token 307 mov r30=r24 308 ;; 309 srlz.d 310 add r31=8,r24 311 ;; 312 313 // r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS 314 // r29=delta 315 { .mmi 316 st8 [r30]=r19,16 // length 317 st8 [r31]=r0,16 // flags 318 ;; 319 } 320 { .mmi 321 st8.spill [r30]=sp,16 // sp 322 st8 [r31]=r20,16 // unat 323 sub sp=r23,r29 324 ;; 325 } 326 { .mmi 327 mov r19=ar.rnat 328 mov r20=ar.bspstore 329 mov r23=rp 330 ;; 331 } 332 // r18=pr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=rp 333 // r24=pfs 334 { .mmi 335 st8 [r30]=r23,16 // rp 336 st8 [r31]=r18,16 // pr 337 mov r24=ar.pfs 338 ;; 339 } 340 { .mmb 341 st8 [r30]=r24,16 // pfs 342 st8 [r31]=r20,16 // bspstore 343 cover 344 ;; 345 } 346 { .mmi 347 mov r18=ar.fpsr 348 mov r23=cr.ipsr 349 extr.u r24=r20,61,3 350 ;; 351 } 352 // r18=fpsr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=ipsr 353 { .mmi 354 st8 [r30]=r19,16 // rnat 355 st8 [r31]=r0,16 // __spare 356 cmp.le p12,p13=IA64_VM_MINKERN_REGION,r24 357 ;; 358 } 359 { .mmi 360 st8.spill [r30]=r13,16 // tp 361 st8 [r31]=r21,16 // rsc 362 tbit.nz p11,p10=r23,14 // p11=interrupts enabled 363 ;; 364 } 365 { .mmi 366 (p13) mov r21=ar.k6 // kernel register stack 367 ;; 368 st8 [r30]=r18,16 // fpsr 369 (p13) dep r20=r20,r21,0,9 // align dirty registers 370 ;; 371 } 372 // r19=rnat, r20=bspstore, r22=iip, r23=ipsr 373 { .mmi 374 st8 [r31]=r23,16 // psr 375 (p13) mov ar.bspstore=r20 376 nop 0 377 ;; 378 } 379 { .mmi 380 (p13) mov ar.rnat=r19 381 mov r18=ar.bsp 382 nop 0 383 ;; 384 } 385 { .mmi 386 mov r19=cr.ifs 387 st8.spill [r30]=gp,16 // gp 388 sub r18=r18,r20 389 ;; 390 } 391 // r18=ndirty, r19=ifs, r22=iip 392 { .mmi 393 st8 [r31]=r18,16 // ndirty 394 st8 [r30]=r19,16 // cfm 395 nop 0 396 ;; 397 } 398 { .mmi 399 mov r18=cr.isr 400 st8 [r31]=r22,16 // iip 401 add r29=16,r30 402 ;; 403 } 404 { .mmi 405 st8 [r30]=r17,24 // ifa 406 st8 [r31]=r18,24 // isr 407 nop 0 408 ;; 409 } 410 { .mmi 411 .mem.offset 0,0 412 st8.spill [r30]=r2,16 // r2 413 .mem.offset 8,0 414 st8.spill [r31]=r3,16 // r3 415 add r2=9*8,r29 416 ;; 417 } 418 { .mmi 419 .mem.offset 0,0 420 st8.spill [r30]=r8,16 // r8 421 .mem.offset 8,0 422 st8.spill [r31]=r9,16 // r9 423 add r3=8,r2 424 ;; 425 } 426 { .mmi 427 .mem.offset 0,0 428 st8.spill [r30]=r10,16 // r10 429 .mem.offset 8,0 430 st8.spill [r31]=r11,16 // r11 431 add r8=16,r16 432 ;; 433 } 434 { .mmi 435 .mem.offset 0,0 436 st8.spill [r30]=r14 // r14 437 .mem.offset 8,0 438 st8.spill [r31]=r15 // r15 439 mov r9=r29 440 } 441 { .mmb 442 mov r10=ar.csd 443 mov r11=ar.ssd 444 bsw.1 445 ;; 446 } 447 { .mmi 448 .mem.offset 0,0 449 st8.spill [r2]=r16,16 // r16 450 .mem.offset 8,0 451 st8.spill [r3]=r17,16 // r17 452 mov r14=b6 453 ;; 454 } 455 { .mmi 456 .mem.offset 0,0 457 st8.spill [r2]=r18,16 // r18 458 .mem.offset 8,0 459 st8.spill [r3]=r19,16 // r19 460 mov r15=b7 461 ;; 462 } 463 { .mmi 464 .mem.offset 0,0 465 st8.spill [r2]=r20,16 // r20 466 .mem.offset 8,0 467 st8.spill [r3]=r21,16 // r21 468 mov b7=r8 469 ;; 470 } 471 { .mmi 472 .mem.offset 0,0 473 st8.spill [r2]=r22,16 // r22 474 .mem.offset 8,0 475 st8.spill [r3]=r23,16 // r23 476 ;; 477 } 478 479 .mem.offset 0,0 480 st8.spill [r2]=r24,16 // r24 481 .mem.offset 8,0 482 st8.spill [r3]=r25,16 // r25 483 ;; 484 .mem.offset 0,0 485 st8.spill [r2]=r26,16 // r26 486 .mem.offset 8,0 487 st8.spill [r3]=r27,16 // r27 488 ;; 489 .mem.offset 0,0 490 st8.spill [r2]=r28,16 // r28 491 .mem.offset 8,0 492 st8.spill [r3]=r29,16 // r29 493 ;; 494 .mem.offset 0,0 495 st8.spill [r2]=r30,16 // r30 496 .mem.offset 8,0 497 st8.spill [r3]=r31,16 // r31 498 ;; 499 500 { .mmi 501 st8 [r2]=r14,16 // b6 502 mov r17=ar.unat 503 nop 0 504 ;; 505 } 506 { .mmi 507 st8 [r3]=r15,16 // b7 508 mov r16=ar.ccv 509 nop 0 510 ;; 511 } 512 { .mmi 513 st8 [r2]=r16,16 // ccv 514 st8 [r3]=r10,16 // csd 515 nop 0 516 ;; 517 } 518 { .mmi 519 st8 [r2]=r11,24 // ssd 520 st8 [r9]=r17 521 nop 0 522 ;; 523 } 524 525 stf.spill [r3]=f6,32 // f6 526 stf.spill [r2]=f7,32 // f7 527 ;; 528 stf.spill [r3]=f8,32 // f8 529 stf.spill [r2]=f9,32 // f9 530 ;; 531 stf.spill [r3]=f10,32 // f10 532 stf.spill [r2]=f11,32 // f11 533 ;; 534 stf.spill [r3]=f12,32 // f12 535 stf.spill [r2]=f13,32 // f13 536 ;; 537 stf.spill [r3]=f14 // f14 538 stf.spill [r2]=f15 // f15 539 ;; 540 { .mmi 541 mov ar.rsc=3 542 mov r13=ar.k4 543 nop 0 544 ;; 545 } 546 { .mlx 547 ssm psr.dt|psr.ic|psr.dfh 548 movl gp=__gp 549 ;; 550 } 551 { .mib 552 srlz.d 553 nop 0 554 br.sptk b7 555 ;; 556 } 557 END(exception_save) 558 559 /* 560 * exception_restore: restore interrupted state 561 * 562 * Arguments: 563 * sp+16 trapframe pointer 564 */ 565 ENTRY_NOPROFILE(exception_restore, 0) 566 { .mmi 567 rsm psr.i 568 add sp=16,sp 569 nop 0 570 ;; 571 } 572 573 // The next instruction can fault. Let it be... 574 tpa r9=sp 575 ;; 576 rsm psr.dt|psr.ic 577 add r8=SIZEOF_SPECIAL+16,r9 578 ;; 579 srlz.d 580 add r2=SIZEOF_TRAPFRAME-16,r9 581 add r3=SIZEOF_TRAPFRAME-32,r9 582 ;; 583 584 { .mmi 585 ldf.fill f15=[r2],-32 // f15 586 ldf.fill f14=[r3],-32 // f14 587 nop 0 588 ;; 589 } 590 { .mmi 591 ldf.fill f13=[r2],-32 // f13 592 ldf.fill f12=[r3],-32 // f12 593 nop 0 594 ;; 595 } 596 { .mmi 597 ldf.fill f11=[r2],-32 // f11 598 ldf.fill f10=[r3],-32 // f10 599 nop 0 600 ;; 601 } 602 { .mmi 603 ldf.fill f9=[r2],-32 // f9 604 ldf.fill f8=[r3],-32 // f8 605 nop 0 606 ;; 607 } 608 { .mmi 609 ldf.fill f7=[r2],-24 // f7 610 ldf.fill f6=[r3],-16 // f6 611 nop 0 612 ;; 613 } 614 { .mmi 615 ld8 r8=[r8] // unat (after) 616 ;; 617 mov ar.unat=r8 618 nop 0 619 ;; 620 } 621 622 ld8 r10=[r2],-16 // ssd 623 ld8 r11=[r3],-16 // csd 624 ;; 625 mov ar.ssd=r10 626 mov ar.csd=r11 627 628 ld8 r14=[r2],-16 // ccv 629 ld8 r15=[r3],-16 // b7 630 ;; 631 632 { .mmi 633 mov ar.ccv=r14 634 ld8 r8=[r2],-16 // b6 635 mov b7=r15 636 ;; 637 } 638 { .mmi 639 ld8.fill r31=[r3],-16 // r31 640 ld8.fill r30=[r2],-16 // r30 641 mov b6=r8 642 ;; 643 } 644 645 ld8.fill r29=[r3],-16 // r29 646 ld8.fill r28=[r2],-16 // r28 647 ;; 648 ld8.fill r27=[r3],-16 // r27 649 ld8.fill r26=[r2],-16 // r26 650 ;; 651 ld8.fill r25=[r3],-16 // r25 652 ld8.fill r24=[r2],-16 // r24 653 ;; 654 ld8.fill r23=[r3],-16 // r23 655 ld8.fill r22=[r2],-16 // r22 656 ;; 657 ld8.fill r21=[r3],-16 // r21 658 ld8.fill r20=[r2],-16 // r20 659 ;; 660 ld8.fill r19=[r3],-16 // r19 661 ld8.fill r18=[r2],-16 // r18 662 ;; 663 664 { .mmb 665 ld8.fill r17=[r3],-16 // r17 666 ld8.fill r16=[r2],-16 // r16 667 bsw.0 668 ;; 669 } 670 { .mii 671 ld8 r16=[r9] // tf_length 672 add r31=16,r9 673 add r30=24,r9 674 } 675 { .mmi 676 ld8.fill r15=[r3],-16 // r15 677 ld8.fill r14=[r2],-16 // r14 678 nop 0 679 ;; 680 } 681 { .mmi 682 ld8.fill r11=[r3],-16 // r11 683 ld8.fill r10=[r2],-16 // r10 684 add r16=r16,sp // ar.k7 685 ;; 686 } 687 { .mmi 688 ld8.fill r9=[r3],-16 // r9 689 ld8.fill r8=[r2],-16 // r8 690 nop 0 691 ;; 692 } 693 { .mmi 694 ld8.fill r3=[r3] // r3 695 ld8.fill r2=[r2] // r2 696 nop 0 697 ;; 698 } 699 700 ld8.fill sp=[r31],16 // sp 701 ld8 r17=[r30],16 // unat 702 ;; 703 ld8 r29=[r31],16 // rp 704 ld8 r18=[r30],16 // pr 705 ;; 706 ld8 r28=[r31],16 // pfs 707 ld8 r20=[r30],24 // bspstore 708 mov rp=r29 709 ;; 710 ld8 r21=[r31],24 // rnat 711 mov ar.pfs=r28 712 ;; 713 ld8.fill r26=[r30],16 // tp 714 ld8 r22=[r31],16 // rsc 715 ;; 716 717 { .mmi 718 ld8 r23=[r30],16 // fpsr 719 ld8 r24=[r31],16 // psr 720 extr.u r28=r20,61,3 721 ;; 722 } 723 { .mmi 724 ld8.fill r1=[r30],16 // gp 725 ld8 r27=[r31],16 // ndirty 726 cmp.le p14,p15=IA64_VM_MINKERN_REGION,r28 727 ;; 728 } 729 { .mmi 730 ld8 r25=[r30] // cfm 731 ld8 r19=[r31] // ip 732 nop 0 733 ;; 734 } 735 { .mii 736 // Switch register stack 737 alloc r30=ar.pfs,0,0,0,0 // discard current frame 738 shl r31=r27,16 // value for ar.rsc 739 (p15) mov r13=r26 740 ;; 741 } 742 // The loadrs can fault if the backing store is not currently 743 // mapped. We assured forward progress by getting everything we 744 // need from the trapframe so that we don't care if the CPU 745 // purges that translation when it needs to insert a new one for 746 // the backing store. 747 { .mmi 748 mov ar.rsc=r31 // setup for loadrs 749 mov ar.k7=r16 750 addl r29=NTLBRT_RESTORE,r0 // 22-bit restart token 751 ;; 752 } 753 754 ssm psr.dt 755 ;; 756 srlz.d 757 mov r16 = r25 758 759 exception_restore_restart: 760 { .mmi 761 mov r30=ar.bspstore 762 ;; 763 loadrs // load user regs 764 mov r29=0 // Clobber restart token 765 ;; 766 } 767 { .mmi 768 mov r31=ar.bspstore 769 ;; 770 mov ar.bspstore=r20 771 dep r31=0,r31,0,13 // 8KB aligned 772 ;; 773 } 774 { .mmi 775 mov cr.ifs=r16 776 mov ar.k6=r31 777 mov pr=r18,0x1ffff 778 ;; 779 } 780 { .mmi 781 mov cr.iip=r19 782 mov ar.unat=r17 783 nop 0 784 ;; 785 } 786 { .mmi 787 mov cr.ipsr=r24 788 mov ar.rnat=r21 789 nop 0 790 ;; 791 } 792 { .mmb 793 mov ar.rsc=r22 794 mov ar.fpsr=r23 795 rfi 796 ;; 797 } 798 END(exception_restore) 799 800 /* 801 * Call exception_save_regs to preserve the interrupted state in a 802 * trapframe. Note that we don't use a call instruction because we 803 * must be careful not to lose track of the RSE state. We then call 804 * trap() with the value of _n_ as an argument to handle the 805 * exception. We arrange for trap() to return to exception_restore 806 * which will restore the interrupted state before executing an rfi to 807 * resume it. 808 */ 809 #define CALL(_func_, _n_, _ifa_) \ 810 { .mib ; \ 811 mov r17=_ifa_ ; \ 812 mov r16=ip ; \ 813 br.sptk exception_save ;; \ 814 } ; \ 815 { .mmi ; \ 816 alloc r15=ar.pfs,0,0,2,0 ;; \ 817 (p11) ssm psr.i ; \ 818 mov out0=_n_ ;; \ 819 } ; \ 820 { .mib ; \ 821 (p11) srlz.d ; \ 822 add out1=16,sp ; \ 823 br.call.sptk rp=_func_ ;; \ 824 } ; \ 825 { .mib ; \ 826 nop 0 ; \ 827 nop 0 ; \ 828 br.sptk exception_restore ;; \ 829 } 830 831 #define IVT_ENTRY(name, offset) \ 832 .org ia64_vector_table + offset; \ 833 .global ivt_##name; \ 834 .proc ivt_##name; \ 835 .prologue; \ 836 .unwabi @svr4, 'I'; \ 837 .save rp, r0; \ 838 .body; \ 839 ivt_##name: \ 840 XTRACE_HOOK(offset) 841 842 #define IVT_END(name) \ 843 .endp ivt_##name 844 845 #ifdef COMPAT_IA32 846 #define IA32_TRAP ia32_trap 847 #else 848 #define IA32_TRAP trap 849 #endif 850 851 /* 852 * The IA64 Interrupt Vector Table (IVT) contains 20 slots with 64 853 * bundles per vector and 48 slots with 16 bundles per vector. 854 */ 855 856 .section .ivt, "ax" 857 858 .align 32768 859 .global ia64_vector_table 860 .size ia64_vector_table, 32768 861 ia64_vector_table: 862 863 IVT_ENTRY(VHPT_Translation, 0x0000) 864 CALL(trap, 0, cr.ifa) 865 IVT_END(VHPT_Translation) 866 867 IVT_ENTRY(Instruction_TLB, 0x0400) 868 mov r16=cr.ifa 869 mov r17=pr 870 ;; 871 thash r18=r16 872 ttag r19=r16 873 ;; 874 add r21=16,r18 // tag 875 add r20=24,r18 // collision chain 876 ;; 877 ld8 r21=[r21] // check VHPT tag 878 ld8 r20=[r20] // bucket head 879 ;; 880 cmp.ne p15,p0=r21,r19 881 (p15) br.dpnt.few 1f 882 ;; 883 ld8 r21=[r18] // read pte 884 ;; 885 itc.i r21 // insert pte 886 mov pr=r17,0x1ffff 887 ;; 888 rfi // done 889 ;; 890 1: rsm psr.dt // turn off data translations 891 dep r20=0,r20,61,3 // convert vhpt ptr to physical 892 ;; 893 srlz.d // serialize 894 ld8 r20=[r20] // first entry 895 ;; 896 2: cmp.eq p15,p0=r0,r20 // done? 897 (p15) br.cond.spnt.few 9f // bail if done 898 ;; 899 add r21=16,r20 // tag location 900 ;; 901 ld8 r21=[r21] // read tag 902 ;; 903 cmp.ne p15,p0=r21,r19 // compare tags 904 (p15) br.cond.sptk.few 3f // if not, read next in chain 905 ;; 906 ld8 r21=[r20] // read pte 907 mov r22=PTE_ACCESSED 908 ;; 909 or r21=r21,r22 910 ;; 911 st8 [r20]=r21,8 912 ;; 913 ld8 r22=[r20] // read rest of pte 914 ;; 915 dep r18=0,r18,61,3 // convert vhpt ptr to physical 916 ;; 917 add r20=16,r18 // address of tag 918 ;; 919 ld8.acq r23=[r20] // read old tag 920 ;; 921 dep r23=-1,r23,63,1 // set ti bit 922 ;; 923 st8.rel [r20]=r23 // store old tag + ti 924 ;; 925 mf // make sure everyone sees 926 ;; 927 st8 [r18]=r21,8 // store pte 928 ;; 929 st8 [r18]=r22,8 930 ;; 931 st8.rel [r18]=r19 // store new tag 932 ;; 933 itc.i r21 // and place in TLB 934 ssm psr.dt 935 ;; 936 srlz.d 937 mov pr=r17,0x1ffff // restore predicates 938 rfi 939 ;; 940 3: add r20=24,r20 // next in chain 941 ;; 942 ld8 r20=[r20] // read chain 943 br.sptk 2b // loop 944 ;; 945 9: ssm psr.dt 946 mov pr=r17,0x1ffff // restore predicates 947 ;; 948 srlz.d 949 ;; 950 CALL(trap, 20, cr.ifa) // Page Not Present trap 951 IVT_END(Instruction_TLB) 952 953 IVT_ENTRY(Data_TLB, 0x0800) 954 mov r16=cr.ifa 955 mov r17=pr 956 ;; 957 thash r18=r16 958 ttag r19=r16 959 ;; 960 add r21=16,r18 // tag 961 add r20=24,r18 // collision chain 962 ;; 963 ld8 r21=[r21] // check VHPT tag 964 ld8 r20=[r20] // bucket head 965 ;; 966 cmp.ne p15,p0=r21,r19 967 (p15) br.dpnt.few 1f 968 ;; 969 ld8 r21=[r18] // read pte 970 ;; 971 itc.d r21 // insert pte 972 mov pr=r17,0x1ffff 973 ;; 974 rfi // done 975 ;; 976 1: rsm psr.dt // turn off data translations 977 dep r20=0,r20,61,3 // convert vhpt ptr to physical 978 ;; 979 srlz.d // serialize 980 ld8 r20=[r20] // first entry 981 ;; 982 2: cmp.eq p15,p0=r0,r20 // done? 983 (p15) br.cond.spnt.few 9f // bail if done 984 ;; 985 add r21=16,r20 // tag location 986 ;; 987 ld8 r21=[r21] // read tag 988 ;; 989 cmp.ne p15,p0=r21,r19 // compare tags 990 (p15) br.cond.sptk.few 3f // if not, read next in chain 991 ;; 992 ld8 r21=[r20] // read pte 993 mov r22=PTE_ACCESSED 994 ;; 995 or r21=r21,r22 996 ;; 997 st8 [r20]=r21,8 998 ;; 999 ld8 r22=[r20] // read rest of pte 1000 ;; 1001 dep r18=0,r18,61,3 // convert vhpt ptr to physical 1002 ;; 1003 add r20=16,r18 // address of tag 1004 ;; 1005 ld8.acq r23=[r20] // read old tag 1006 ;; 1007 dep r23=-1,r23,63,1 // set ti bit 1008 ;; 1009 st8.rel [r20]=r23 // store old tag + ti 1010 ;; 1011 mf // make sure everyone sees 1012 ;; 1013 st8 [r18]=r21,8 // store pte 1014 ;; 1015 st8 [r18]=r22,8 1016 ;; 1017 st8.rel [r18]=r19 // store new tag 1018 ;; 1019 itc.d r21 // and place in TLB 1020 ssm psr.dt 1021 ;; 1022 srlz.d 1023 mov pr=r17,0x1ffff // restore predicates 1024 rfi 1025 ;; 1026 3: add r20=24,r20 // next in chain 1027 ;; 1028 ld8 r20=[r20] // read chain 1029 br.sptk 2b // loop 1030 ;; 1031 9: ssm psr.dt 1032 mov pr=r17,0x1ffff // restore predicates 1033 ;; 1034 srlz.d 1035 ;; 1036 CALL(trap, 20, cr.ifa) // Page Not Present trap 1037 IVT_END(Data_TLB) 1038 1039 IVT_ENTRY(Alternate_Instruction_TLB, 0x0c00) 1040 mov r16=cr.ifa // where did it happen 1041 mov r18=pr // save predicates 1042 ;; 1043 extr.u r17=r16,61,3 // get region number 1044 mov r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX 1045 ;; 1046 cmp.eq p13,p0=IA64_PBVM_RR,r17 // RR4? 1047 (p13) br.cond.sptk.few 4f 1048 ;; 1049 cmp.ge p13,p0=5,r17 // RR0-RR5? 1050 cmp.eq p14,p15=7,r17 // RR7? 1051 (p13) br.cond.spnt.few 9f 1052 ;; 1053 (p14) add r19=PTE_MA_WB,r19 1054 (p15) add r19=PTE_MA_UC,r19 1055 dep r17=0,r16,50,14 // clear bits above PPN 1056 ;; 1057 1: dep r16=r19,r17,0,12 // put pte bits in 0..11 1058 ;; 1059 itc.i r16 1060 mov pr=r18,0x1ffff // restore predicates 1061 ;; 1062 rfi 1063 ;; 1064 4: 1065 add r19=PTE_MA_WB,r19 1066 movl r17=IA64_PBVM_BASE 1067 ;; 1068 sub r17=r16,r17 1069 movl r16=IA64_PBVM_PGTBL 1070 ;; 1071 extr.u r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT 1072 ;; 1073 shladd r16=r17,3,r16 1074 ;; 1075 ld8 r17=[r16] 1076 br.sptk 1b 1077 ;; 1078 9: mov pr=r18,0x1ffff // restore predicates 1079 CALL(trap, 3, cr.ifa) 1080 IVT_END(Alternate_Instruction_TLB) 1081 1082 IVT_ENTRY(Alternate_Data_TLB, 0x1000) 1083 mov r16=cr.ifa // where did it happen 1084 mov r18=pr // save predicates 1085 ;; 1086 extr.u r17=r16,61,3 // get region number 1087 mov r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX 1088 ;; 1089 cmp.eq p13,p0=IA64_PBVM_RR,r17 // RR4? 1090 (p13) br.cond.sptk.few 4f 1091 ;; 1092 cmp.ge p13,p0=5,r17 // RR0-RR5? 1093 cmp.eq p14,p15=7,r17 // RR7? 1094 (p13) br.cond.spnt.few 9f 1095 ;; 1096 (p14) add r19=PTE_MA_WB,r19 1097 (p15) add r19=PTE_MA_UC,r19 1098 dep r17=0,r16,50,14 // clear bits above PPN 1099 ;; 1100 1: dep r16=r19,r17,0,12 // put pte bits in 0..11 1101 ;; 1102 itc.d r16 1103 mov pr=r18,0x1ffff // restore predicates 1104 ;; 1105 rfi 1106 ;; 1107 4: 1108 add r19=PTE_MA_WB,r19 1109 movl r17=IA64_PBVM_BASE 1110 ;; 1111 sub r17=r16,r17 1112 movl r16=IA64_PBVM_PGTBL 1113 ;; 1114 extr.u r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT 1115 ;; 1116 shladd r16=r17,3,r16 1117 ;; 1118 ld8 r17=[r16] 1119 br.sptk 1b 1120 ;; 1121 9: mov pr=r18,0x1ffff // restore predicates 1122 CALL(trap, 4, cr.ifa) 1123 IVT_END(Alternate_Data_TLB) 1124 1125 IVT_ENTRY(Data_Nested_TLB, 0x1400) 1126 // See exception_save_restart and exception_restore_restart for the 1127 // contexts that may cause a data nested TLB. We can only use the 1128 // banked general registers and predicates, but don't use: 1129 // p14 & p15 - Set in exception save 1130 // r16 & r17 - Arguments to exception save 1131 // r30 - Faulting address (modulo page size) 1132 // We assume r30 has the virtual addresses that relate to the data 1133 // nested TLB fault. The address does not have to be exact, as long 1134 // as it's in the same page. We use physical addressing to avoid 1135 // double nested faults. Since all virtual addresses we encounter 1136 // here are direct mapped region 7 addresses, we have no problem 1137 // constructing physical addresses. 1138 1139 { .mmi 1140 mov cr.ifa=r30 1141 mov r26=rr[r30] 1142 extr.u r27=r30,61,3 1143 ;; 1144 } 1145 { .mii 1146 nop 0 1147 dep r26=0,r26,0,2 1148 cmp.eq p12,p13=7,r27 1149 ;; 1150 } 1151 { .mii 1152 mov cr.itir=r26 1153 (p12) dep r28=0,r30,0,12 1154 (p13) extr.u r28=r30,3*PAGE_SHIFT-8, PAGE_SHIFT-3 // dir L0 index 1155 ;; 1156 } 1157 { .mlx 1158 (p12) add r28=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX+PTE_MA_WB,r28 1159 (p13) movl r27=ia64_kptdir 1160 ;; 1161 } 1162 { .mib 1163 (p13) ld8 r27=[r27] 1164 (p13) extr.u r26=r30,2*PAGE_SHIFT-5, PAGE_SHIFT-3 // dir L1 index 1165 (p12) br.cond.spnt.few 1f 1166 ;; 1167 } 1168 { .mmi 1169 rsm psr.dt 1170 ;; 1171 srlz.d 1172 dep r27=0,r27,61,3 1173 ;; 1174 } 1175 { .mmi 1176 shladd r27=r28,3,r27 1177 ;; 1178 ld8 r27=[r27] // dir L1 page 1179 extr.u r28=r30,PAGE_SHIFT,PAGE_SHIFT-5 // pte index 1180 ;; 1181 } 1182 { .mii 1183 shladd r27=r26,3,r27 1184 shl r28=r28,5 1185 ;; 1186 dep r27=0,r27,61,3 1187 ;; 1188 } 1189 ld8 r27=[r27] // pte page 1190 ;; 1191 add r27=r28,r27 1192 ;; 1193 dep r27=0,r27,61,3 1194 ;; 1195 ld8 r28=[r27] // pte 1196 ;; 1197 or r28=PTE_DIRTY+PTE_ACCESSED,r28 1198 ;; 1199 st8 [r27]=r28 1200 ;; 1201 ssm psr.dt 1202 ;; 1203 1: 1204 { .mmi 1205 itc.d r28 1206 ;; 1207 addl r26=NTLBRT_SAVE,r0 1208 addl r27=NTLBRT_RESTORE,r0 1209 ;; 1210 } 1211 { .mmi 1212 srlz.d 1213 cmp.eq p12,p0=r29,r26 1214 cmp.eq p13,p0=r29,r27 1215 ;; 1216 } 1217 { .mbb 1218 nop 0 1219 (p12) br.cond.sptk.few exception_save_restart 1220 (p13) br.cond.sptk.few exception_restore_restart 1221 ;; 1222 } 1223 1224 { .mlx 1225 mov r26=ar.bsp 1226 movl r29=kstack 1227 ;; 1228 } 1229 { .mlx 1230 mov r28=sp 1231 movl r27=kstack_top 1232 ;; 1233 } 1234 { .mmi 1235 add sp=-16,r27 1236 ;; 1237 mov r27=ar.bspstore 1238 nop 0 1239 ;; 1240 } 1241 mov ar.rsc=0 1242 dep r29=r27,r29,0,9 1243 ;; 1244 mov ar.bspstore=r29 1245 ;; 1246 CALL(trap, 5, r30) 1247 IVT_END(Data_Nested_TLB) 1248 1249 IVT_ENTRY(Instruction_Key_Miss, 0x1800) 1250 CALL(trap, 6, cr.ifa) 1251 IVT_END(Instruction_Key_Miss) 1252 1253 IVT_ENTRY(Data_Key_Miss, 0x1c00) 1254 CALL(trap, 7, cr.ifa) 1255 IVT_END(Data_Key_Miss) 1256 1257 IVT_ENTRY(Dirty_Bit, 0x2000) 1258 mov r16=cr.ifa 1259 mov r17=pr 1260 ;; 1261 thash r18=r16 1262 ;; 1263 ttag r19=r16 1264 add r20=24,r18 // collision chain 1265 ;; 1266 ld8 r20=[r20] // bucket head 1267 ;; 1268 rsm psr.dt // turn off data translations 1269 dep r20=0,r20,61,3 // convert vhpt ptr to physical 1270 ;; 1271 srlz.d // serialize 1272 ld8 r20=[r20] // first entry 1273 ;; 1274 1: cmp.eq p15,p0=r0,r20 // done? 1275 (p15) br.cond.spnt.few 9f // bail if done 1276 ;; 1277 add r21=16,r20 // tag location 1278 ;; 1279 ld8 r21=[r21] // read tag 1280 ;; 1281 cmp.ne p15,p0=r21,r19 // compare tags 1282 (p15) br.cond.sptk.few 2f // if not, read next in chain 1283 ;; 1284 ld8 r21=[r20] // read pte 1285 mov r22=PTE_DIRTY+PTE_ACCESSED 1286 ;; 1287 or r21=r22,r21 // set dirty & access bit 1288 ;; 1289 st8 [r20]=r21,8 // store back 1290 ;; 1291 ld8 r22=[r20] // read rest of pte 1292 ;; 1293 dep r18=0,r18,61,3 // convert vhpt ptr to physical 1294 ;; 1295 add r20=16,r18 // address of tag 1296 ;; 1297 ld8.acq r23=[r20] // read old tag 1298 ;; 1299 dep r23=-1,r23,63,1 // set ti bit 1300 ;; 1301 st8.rel [r20]=r23 // store old tag + ti 1302 ;; 1303 mf // make sure everyone sees 1304 ;; 1305 st8 [r18]=r21,8 // store pte 1306 ;; 1307 st8 [r18]=r22,8 1308 ;; 1309 st8.rel [r18]=r19 // store new tag 1310 ;; 1311 itc.d r21 // and place in TLB 1312 ssm psr.dt 1313 ;; 1314 srlz.d 1315 mov pr=r17,0x1ffff // restore predicates 1316 rfi 1317 ;; 1318 2: add r20=24,r20 // next in chain 1319 ;; 1320 ld8 r20=[r20] // read chain 1321 br.sptk 1b // loop 1322 ;; 1323 9: ssm psr.dt 1324 mov pr=r17,0x1ffff // restore predicates 1325 ;; 1326 srlz.d 1327 ;; 1328 CALL(trap, 8, cr.ifa) // die horribly 1329 IVT_END(Dirty_Bit) 1330 1331 IVT_ENTRY(Instruction_Access_Bit, 0x2400) 1332 mov r16=cr.ifa 1333 mov r17=pr 1334 ;; 1335 thash r18=r16 1336 ;; 1337 ttag r19=r16 1338 add r20=24,r18 // collision chain 1339 ;; 1340 ld8 r20=[r20] // bucket head 1341 ;; 1342 rsm psr.dt // turn off data translations 1343 dep r20=0,r20,61,3 // convert vhpt ptr to physical 1344 ;; 1345 srlz.d // serialize 1346 ld8 r20=[r20] // first entry 1347 ;; 1348 1: cmp.eq p15,p0=r0,r20 // done? 1349 (p15) br.cond.spnt.few 9f // bail if done 1350 ;; 1351 add r21=16,r20 // tag location 1352 ;; 1353 ld8 r21=[r21] // read tag 1354 ;; 1355 cmp.ne p15,p0=r21,r19 // compare tags 1356 (p15) br.cond.sptk.few 2f // if not, read next in chain 1357 ;; 1358 ld8 r21=[r20] // read pte 1359 mov r22=PTE_ACCESSED 1360 ;; 1361 or r21=r22,r21 // set accessed bit 1362 ;; 1363 st8 [r20]=r21,8 // store back 1364 ;; 1365 ld8 r22=[r20] // read rest of pte 1366 ;; 1367 dep r18=0,r18,61,3 // convert vhpt ptr to physical 1368 ;; 1369 add r20=16,r18 // address of tag 1370 ;; 1371 ld8.acq r23=[r20] // read old tag 1372 ;; 1373 dep r23=-1,r23,63,1 // set ti bit 1374 ;; 1375 st8.rel [r20]=r23 // store old tag + ti 1376 ;; 1377 mf // make sure everyone sees 1378 ;; 1379 st8 [r18]=r21,8 // store pte 1380 ;; 1381 st8 [r18]=r22,8 1382 ;; 1383 st8.rel [r18]=r19 // store new tag 1384 ;; 1385 itc.i r21 // and place in TLB 1386 ssm psr.dt 1387 ;; 1388 srlz.d 1389 mov pr=r17,0x1ffff // restore predicates 1390 rfi // walker will retry the access 1391 ;; 1392 2: add r20=24,r20 // next in chain 1393 ;; 1394 ld8 r20=[r20] // read chain 1395 br.sptk 1b // loop 1396 ;; 1397 9: ssm psr.dt 1398 mov pr=r17,0x1ffff // restore predicates 1399 ;; 1400 srlz.d 1401 ;; 1402 CALL(trap, 9, cr.ifa) 1403 IVT_END(Instruction_Access_Bit) 1404 1405 IVT_ENTRY(Data_Access_Bit, 0x2800) 1406 mov r16=cr.ifa 1407 mov r17=pr 1408 ;; 1409 thash r18=r16 1410 ;; 1411 ttag r19=r16 1412 add r20=24,r18 // collision chain 1413 ;; 1414 ld8 r20=[r20] // bucket head 1415 ;; 1416 rsm psr.dt // turn off data translations 1417 dep r20=0,r20,61,3 // convert vhpt ptr to physical 1418 ;; 1419 srlz.d // serialize 1420 ld8 r20=[r20] // first entry 1421 ;; 1422 1: cmp.eq p15,p0=r0,r20 // done? 1423 (p15) br.cond.spnt.few 9f // bail if done 1424 ;; 1425 add r21=16,r20 // tag location 1426 ;; 1427 ld8 r21=[r21] // read tag 1428 ;; 1429 cmp.ne p15,p0=r21,r19 // compare tags 1430 (p15) br.cond.sptk.few 2f // if not, read next in chain 1431 ;; 1432 ld8 r21=[r20] // read pte 1433 mov r22=PTE_ACCESSED 1434 ;; 1435 or r21=r22,r21 // set accessed bit 1436 ;; 1437 st8 [r20]=r21,8 // store back 1438 ;; 1439 ld8 r22=[r20] // read rest of pte 1440 ;; 1441 dep r18=0,r18,61,3 // convert vhpt ptr to physical 1442 ;; 1443 add r20=16,r18 // address of tag 1444 ;; 1445 ld8.acq r23=[r20] // read old tag 1446 ;; 1447 dep r23=-1,r23,63,1 // set ti bit 1448 ;; 1449 st8.rel [r20]=r23 // store old tag + ti 1450 ;; 1451 mf // make sure everyone sees 1452 ;; 1453 st8 [r18]=r21,8 // store pte 1454 ;; 1455 st8 [r18]=r22,8 1456 ;; 1457 st8.rel [r18]=r19 // store new tag 1458 ;; 1459 itc.d r21 // and place in TLB 1460 ssm psr.dt 1461 ;; 1462 srlz.d 1463 mov pr=r17,0x1ffff // restore predicates 1464 rfi // walker will retry the access 1465 ;; 1466 2: add r20=24,r20 // next in chain 1467 ;; 1468 ld8 r20=[r20] // read chain 1469 br.sptk 1b // loop 1470 ;; 1471 9: ssm psr.dt 1472 mov pr=r17,0x1ffff // restore predicates 1473 ;; 1474 srlz.d 1475 ;; 1476 CALL(trap, 10, cr.ifa) 1477 IVT_END(Data_Access_Bit) 1478 1479 IVT_ENTRY(Break_Instruction, 0x2c00) 1480 { .mib 1481 mov r17=cr.iim 1482 mov r16=ip 1483 br.sptk exception_save 1484 ;; 1485 } 1486 { .mmi 1487 alloc r15=ar.pfs,0,0,2,0 1488 ;; 1489 (p11) ssm psr.i 1490 mov out0=11 1491 ;; 1492 } 1493 { .mmi 1494 flushrs 1495 ;; 1496 (p11) srlz.d 1497 add out1=16,sp 1498 } 1499 { .mib 1500 nop 0 1501 nop 0 1502 br.call.sptk rp=trap 1503 ;; 1504 } 1505 { .mib 1506 nop 0 1507 nop 0 1508 br.sptk exception_restore 1509 ;; 1510 } 1511 IVT_END(Break_Instruction) 1512 1513 IVT_ENTRY(External_Interrupt, 0x3000) 1514 { .mib 1515 mov r17=0 1516 mov r16=ip 1517 br.sptk exception_save 1518 ;; 1519 } 1520 { .mmi 1521 alloc r15=ar.pfs,0,0,1,0 1522 nop 0 1523 nop 0 1524 ;; 1525 } 1526 { .mib 1527 add out0=16,sp 1528 nop 0 1529 br.call.sptk rp=ia64_handle_intr 1530 ;; 1531 } 1532 { .mib 1533 nop 0 1534 nop 0 1535 br.sptk exception_restore 1536 ;; 1537 } 1538 IVT_END(External_Interrupt) 1539 1540 IVT_ENTRY(Reserved_3400, 0x3400) 1541 CALL(trap, 13, cr.ifa) 1542 IVT_END(Reserved_3400) 1543 1544 IVT_ENTRY(Reserved_3800, 0x3800) 1545 CALL(trap, 14, cr.ifa) 1546 IVT_END(Reserved_3800) 1547 1548 IVT_ENTRY(Reserved_3c00, 0x3c00) 1549 CALL(trap, 15, cr.ifa) 1550 IVT_END(Reserved_3c00) 1551 1552 IVT_ENTRY(Reserved_4000, 0x4000) 1553 CALL(trap, 16, cr.ifa) 1554 IVT_END(Reserved_4000) 1555 1556 IVT_ENTRY(Reserved_4400, 0x4400) 1557 CALL(trap, 17, cr.ifa) 1558 IVT_END(Reserved_4400) 1559 1560 IVT_ENTRY(Reserved_4800, 0x4800) 1561 CALL(trap, 18, cr.ifa) 1562 IVT_END(Reserved_4800) 1563 1564 IVT_ENTRY(Reserved_4c00, 0x4c00) 1565 CALL(trap, 19, cr.ifa) 1566 IVT_END(Reserved_4c00) 1567 1568 IVT_ENTRY(Page_Not_Present, 0x5000) 1569 CALL(trap, 20, cr.ifa) 1570 IVT_END(Page_Not_Present) 1571 1572 IVT_ENTRY(Key_Permission, 0x5100) 1573 CALL(trap, 21, cr.ifa) 1574 IVT_END(Key_Permission) 1575 1576 IVT_ENTRY(Instruction_Access_Rights, 0x5200) 1577 CALL(trap, 22, cr.ifa) 1578 IVT_END(Instruction_Access_Rights) 1579 1580 IVT_ENTRY(Data_Access_Rights, 0x5300) 1581 CALL(trap, 23, cr.ifa) 1582 IVT_END(Data_Access_Rights) 1583 1584 IVT_ENTRY(General_Exception, 0x5400) 1585 CALL(trap, 24, cr.ifa) 1586 IVT_END(General_Exception) 1587 1588 IVT_ENTRY(Disabled_FP_Register, 0x5500) 1589 CALL(trap, 25, cr.ifa) 1590 IVT_END(Disabled_FP_Register) 1591 1592 IVT_ENTRY(NaT_Consumption, 0x5600) 1593 CALL(trap, 26, cr.ifa) 1594 IVT_END(NaT_Consumption) 1595 1596 IVT_ENTRY(Speculation, 0x5700) 1597 CALL(trap, 27, cr.iim) 1598 IVT_END(Speculation) 1599 1600 IVT_ENTRY(Reserved_5800, 0x5800) 1601 CALL(trap, 28, cr.ifa) 1602 IVT_END(Reserved_5800) 1603 1604 IVT_ENTRY(Debug, 0x5900) 1605 CALL(trap, 29, cr.ifa) 1606 IVT_END(Debug) 1607 1608 IVT_ENTRY(Unaligned_Reference, 0x5a00) 1609 CALL(trap, 30, cr.ifa) 1610 IVT_END(Unaligned_Reference) 1611 1612 IVT_ENTRY(Unsupported_Data_Reference, 0x5b00) 1613 CALL(trap, 31, cr.ifa) 1614 IVT_END(Unsupported_Data_Reference) 1615 1616 IVT_ENTRY(Floating_Point_Fault, 0x5c00) 1617 CALL(trap, 32, cr.ifa) 1618 IVT_END(Floating_Point_Fault) 1619 1620 IVT_ENTRY(Floating_Point_Trap, 0x5d00) 1621 CALL(trap, 33, cr.ifa) 1622 IVT_END(Floating_Point_Trap) 1623 1624 IVT_ENTRY(Lower_Privilege_Transfer_Trap, 0x5e00) 1625 CALL(trap, 34, cr.ifa) 1626 IVT_END(Lower_Privilege_Transfer_Trap) 1627 1628 IVT_ENTRY(Taken_Branch_Trap, 0x5f00) 1629 CALL(trap, 35, cr.ifa) 1630 IVT_END(Taken_Branch_Trap) 1631 1632 IVT_ENTRY(Single_Step_Trap, 0x6000) 1633 CALL(trap, 36, cr.ifa) 1634 IVT_END(Single_Step_Trap) 1635 1636 IVT_ENTRY(Reserved_6100, 0x6100) 1637 CALL(trap, 37, cr.ifa) 1638 IVT_END(Reserved_6100) 1639 1640 IVT_ENTRY(Reserved_6200, 0x6200) 1641 CALL(trap, 38, cr.ifa) 1642 IVT_END(Reserved_6200) 1643 1644 IVT_ENTRY(Reserved_6300, 0x6300) 1645 CALL(trap, 39, cr.ifa) 1646 IVT_END(Reserved_6300) 1647 1648 IVT_ENTRY(Reserved_6400, 0x6400) 1649 CALL(trap, 40, cr.ifa) 1650 IVT_END(Reserved_6400) 1651 1652 IVT_ENTRY(Reserved_6500, 0x6500) 1653 CALL(trap, 41, cr.ifa) 1654 IVT_END(Reserved_6500) 1655 1656 IVT_ENTRY(Reserved_6600, 0x6600) 1657 CALL(trap, 42, cr.ifa) 1658 IVT_END(Reserved_6600) 1659 1660 IVT_ENTRY(Reserved_6700, 0x6700) 1661 CALL(trap, 43, cr.ifa) 1662 IVT_END(Reserved_6700) 1663 1664 IVT_ENTRY(Reserved_6800, 0x6800) 1665 CALL(trap, 44, cr.ifa) 1666 IVT_END(Reserved_6800) 1667 1668 IVT_ENTRY(IA_32_Exception, 0x6900) 1669 CALL(IA32_TRAP, 45, cr.ifa) 1670 IVT_END(IA_32_Exception) 1671 1672 IVT_ENTRY(IA_32_Intercept, 0x6a00) 1673 CALL(IA32_TRAP, 46, cr.iim) 1674 IVT_END(IA_32_Intercept) 1675 1676 IVT_ENTRY(IA_32_Interrupt, 0x6b00) 1677 CALL(IA32_TRAP, 47, cr.ifa) 1678 IVT_END(IA_32_Interrupt) 1679 1680 IVT_ENTRY(Reserved_6c00, 0x6c00) 1681 CALL(trap, 48, cr.ifa) 1682 IVT_END(Reserved_6c00) 1683 1684 IVT_ENTRY(Reserved_6d00, 0x6d00) 1685 CALL(trap, 49, cr.ifa) 1686 IVT_END(Reserved_6d00) 1687 1688 IVT_ENTRY(Reserved_6e00, 0x6e00) 1689 CALL(trap, 50, cr.ifa) 1690 IVT_END(Reserved_6e00) 1691 1692 IVT_ENTRY(Reserved_6f00, 0x6f00) 1693 CALL(trap, 51, cr.ifa) 1694 IVT_END(Reserved_6f00) 1695 1696 IVT_ENTRY(Reserved_7000, 0x7000) 1697 CALL(trap, 52, cr.ifa) 1698 IVT_END(Reserved_7000) 1699 1700 IVT_ENTRY(Reserved_7100, 0x7100) 1701 CALL(trap, 53, cr.ifa) 1702 IVT_END(Reserved_7100) 1703 1704 IVT_ENTRY(Reserved_7200, 0x7200) 1705 CALL(trap, 54, cr.ifa) 1706 IVT_END(Reserved_7200) 1707 1708 IVT_ENTRY(Reserved_7300, 0x7300) 1709 CALL(trap, 55, cr.ifa) 1710 IVT_END(Reserved_7300) 1711 1712 IVT_ENTRY(Reserved_7400, 0x7400) 1713 CALL(trap, 56, cr.ifa) 1714 IVT_END(Reserved_7400) 1715 1716 IVT_ENTRY(Reserved_7500, 0x7500) 1717 CALL(trap, 57, cr.ifa) 1718 IVT_END(Reserved_7500) 1719 1720 IVT_ENTRY(Reserved_7600, 0x7600) 1721 CALL(trap, 58, cr.ifa) 1722 IVT_END(Reserved_7600) 1723 1724 IVT_ENTRY(Reserved_7700, 0x7700) 1725 CALL(trap, 59, cr.ifa) 1726 IVT_END(Reserved_7700) 1727 1728 IVT_ENTRY(Reserved_7800, 0x7800) 1729 CALL(trap, 60, cr.ifa) 1730 IVT_END(Reserved_7800) 1731 1732 IVT_ENTRY(Reserved_7900, 0x7900) 1733 CALL(trap, 61, cr.ifa) 1734 IVT_END(Reserved_7900) 1735 1736 IVT_ENTRY(Reserved_7a00, 0x7a00) 1737 CALL(trap, 62, cr.ifa) 1738 IVT_END(Reserved_7a00) 1739 1740 IVT_ENTRY(Reserved_7b00, 0x7b00) 1741 CALL(trap, 63, cr.ifa) 1742 IVT_END(Reserved_7b00) 1743 1744 IVT_ENTRY(Reserved_7c00, 0x7c00) 1745 CALL(trap, 64, cr.ifa) 1746 IVT_END(Reserved_7c00) 1747 1748 IVT_ENTRY(Reserved_7d00, 0x7d00) 1749 CALL(trap, 65, cr.ifa) 1750 IVT_END(Reserved_7d00) 1751 1752 IVT_ENTRY(Reserved_7e00, 0x7e00) 1753 CALL(trap, 66, cr.ifa) 1754 IVT_END(Reserved_7e00) 1755 1756 IVT_ENTRY(Reserved_7f00, 0x7f00) 1757 CALL(trap, 67, cr.ifa) 1758 IVT_END(Reserved_7f00) 1759