Home | History | Annotate | Line # | Download | only in powerpc
      1 /*	$NetBSD: trap.c,v 1.165 2023/12/15 09:31:02 rin Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
      5  * Copyright (C) 1995, 1996 TooLs GmbH.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by TooLs GmbH.
     19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #define	__UFETCHSTORE_PRIVATE
     35 #define	__UCAS_PRIVATE
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.165 2023/12/15 09:31:02 rin Exp $");
     39 
     40 #ifdef _KERNEL_OPT
     41 #include "opt_altivec.h"
     42 #include "opt_ddb.h"
     43 #include "opt_ppcarch.h"
     44 #endif
     45 
     46 #include <sys/param.h>
     47 
     48 #include <sys/proc.h>
     49 #include <sys/ras.h>
     50 #include <sys/reboot.h>
     51 #include <sys/systm.h>
     52 #include <sys/kauth.h>
     53 #include <sys/cpu.h>
     54 
     55 #include <uvm/uvm_extern.h>
     56 
     57 #include <dev/cons.h>
     58 
     59 #include <powerpc/altivec.h>
     60 #include <powerpc/db_machdep.h>
     61 #include <powerpc/fpu.h>
     62 #include <powerpc/frame.h>
     63 #include <powerpc/instr.h>
     64 #include <powerpc/pcb.h>
     65 #include <powerpc/pmap.h>
     66 #include <powerpc/trap.h>
     67 #include <powerpc/userret.h>
     68 
     69 #include <powerpc/spr.h>
     70 #include <powerpc/oea/spr.h>
     71 
     72 static int emulate_privileged(struct lwp *, struct trapframe *);
     73 static int fix_unaligned(struct lwp *, struct trapframe *);
     74 static inline vaddr_t setusr(vaddr_t, size_t *);
     75 static inline void unsetusr(void);
     76 
     77 void trap(struct trapframe *);	/* Called from locore / trap_subr */
     78 /* Why are these not defined in a header? */
     79 int badaddr(void *, size_t);
     80 int badaddr_read(void *, size_t, int *);
     81 
     82 struct dsi_info {
     83     uint16_t indicator;
     84     uint16_t flags;
     85 };
     86 
     87 static const struct dsi_info* get_dsi_info(register_t);
     88 
     89 void
     90 trap(struct trapframe *tf)
     91 {
     92 	struct cpu_info * const ci = curcpu();
     93 	struct lwp * const l = curlwp;
     94 	struct proc * const p = l->l_proc;
     95 	struct pcb * const pcb = curpcb;
     96 	struct vm_map *map;
     97 	ksiginfo_t ksi;
     98 	const bool usertrap = (tf->tf_srr1 & PSL_PR);
     99 	int type = tf->tf_exc;
    100 	int ftype, rv;
    101 
    102 	ci->ci_ev_traps.ev_count++;
    103 
    104 	KASSERTMSG(!usertrap || tf == l->l_md.md_utf,
    105 	    "trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, l->l_md.md_utf);
    106 
    107 	if (usertrap) {
    108 		type |= EXC_USER;
    109 #ifdef DIAGNOSTIC
    110 		if (l == NULL || p == NULL)
    111 			panic("trap: user trap %d with lwp = %p, proc = %p",
    112 			    type, l, p);
    113 #endif
    114 	}
    115 
    116 	ci->ci_data.cpu_ntrap++;
    117 
    118 	switch (type) {
    119 	case EXC_RUNMODETRC|EXC_USER:
    120 		/* FALLTHROUGH */
    121 	case EXC_TRC|EXC_USER:
    122 		tf->tf_srr1 &= ~PSL_SE;
    123 		if (p->p_raslist == NULL ||
    124 		    ras_lookup(p, (void *)tf->tf_srr0) == (void *) -1) {
    125 			KSI_INIT_TRAP(&ksi);
    126 			ksi.ksi_signo = SIGTRAP;
    127 			ksi.ksi_trap = EXC_TRC;
    128 			ksi.ksi_addr = (void *)tf->tf_srr0;
    129 			ksi.ksi_code = TRAP_TRACE;
    130 			(*p->p_emul->e_trapsignal)(l, &ksi);
    131 		}
    132 		break;
    133 	case EXC_DSI: {
    134 		struct faultbuf * const fb = pcb->pcb_onfault;
    135 		vaddr_t va = tf->tf_dar;
    136 
    137 		ci->ci_ev_kdsi.ev_count++;
    138 
    139 		if ((va >> ADDR_SR_SHFT) == pcb->pcb_kmapsr) {
    140 			va &= ADDR_PIDX | ADDR_POFF;
    141 			va |= pcb->pcb_umapsr << ADDR_SR_SHFT;
    142 			map = &p->p_vmspace->vm_map;
    143 		}
    144 #if defined(DIAGNOSTIC) && !defined(PPC_OEA64)
    145 		else if (__predict_false((va >> ADDR_SR_SHFT) == USER_SR)) {
    146 			printf("trap: kernel %s DSI trap @ %#lx by %#lx"
    147 			    " (DSISR %#x): USER_SR unset\n",
    148 			    (tf->tf_dsisr & DSISR_STORE)
    149 				? "write" : "read",
    150 			    va, tf->tf_srr0, tf->tf_dsisr);
    151 			goto brain_damage2;
    152 		}
    153 #endif
    154 		else {
    155 			map = kernel_map;
    156 		}
    157 
    158 #ifdef PPC_OEA64
    159 		if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
    160 		    vm_map_pmap(map)->pm_ste_evictions > 0 &&
    161 		    pmap_ste_spill(vm_map_pmap(map), trunc_page(va), false))
    162 			return;
    163 #endif
    164 		if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
    165 		    vm_map_pmap(map)->pm_evictions > 0 &&
    166 		    pmap_pte_spill(vm_map_pmap(map), trunc_page(va), false))
    167 			return;
    168 
    169 		/*
    170 		 * Only query UVM if no interrupts are active.
    171 		 */
    172 		if (ci->ci_idepth < 0) {
    173 			if (tf->tf_dsisr & DSISR_STORE)
    174 				ftype = VM_PROT_WRITE;
    175 			else
    176 				ftype = VM_PROT_READ;
    177 
    178 			pcb->pcb_onfault = NULL;
    179 			rv = uvm_fault(map, trunc_page(va), ftype);
    180 			pcb->pcb_onfault = fb;
    181 
    182 			if (map != kernel_map) {
    183 				/*
    184 				 * Record any stack growth...
    185 				 */
    186 				if (rv == 0)
    187 					uvm_grow(p, trunc_page(va));
    188 			}
    189 			if (rv == 0)
    190 				return;
    191 			if (rv == EACCES)
    192 				rv = EFAULT;
    193 		} else {
    194 			/*
    195 			 * Note that this implies that access to the USER
    196 			 * segment is not allowed in interrupt context.
    197 			 */
    198 			rv = EFAULT;
    199 		}
    200 		if (fb != NULL) {
    201 			tf->tf_srr0 = fb->fb_pc;
    202 			tf->tf_cr = fb->fb_cr;
    203 			tf->tf_fixreg[1] = fb->fb_sp;
    204 			tf->tf_fixreg[2] = fb->fb_r2;
    205 			tf->tf_fixreg[3] = rv;
    206 			memcpy(&tf->tf_fixreg[13], fb->fb_fixreg,
    207 			    sizeof(fb->fb_fixreg));
    208 			return;
    209 		}
    210 		printf("trap: kernel %s DSI trap @ %#lx by %#lx (DSISR %#x, err"
    211 		    "=%d), lr %#lx\n", (tf->tf_dsisr & DSISR_STORE) ? "write" : "read",
    212 		    va, tf->tf_srr0, tf->tf_dsisr, rv, tf->tf_lr);
    213 		goto brain_damage2;
    214 	}
    215 	case EXC_DSI|EXC_USER:
    216 		ci->ci_ev_udsi.ev_count++;
    217 		if (tf->tf_dsisr & DSISR_STORE)
    218 			ftype = VM_PROT_WRITE;
    219 		else
    220 			ftype = VM_PROT_READ;
    221 
    222 		/*
    223 		 * Try to spill an evicted pte into the page table
    224 		 * if this wasn't a protection fault and the pmap
    225 		 * has some evicted pte's.
    226 		 */
    227 		map = &p->p_vmspace->vm_map;
    228 #ifdef PPC_OEA64
    229 		if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
    230 		    vm_map_pmap(map)->pm_ste_evictions > 0 &&
    231 		    pmap_ste_spill(vm_map_pmap(map), trunc_page(tf->tf_dar),
    232 				   false)) {
    233 			break;
    234 		}
    235 #endif
    236 
    237 		if ((tf->tf_dsisr & DSISR_NOTFOUND) &&
    238 		    vm_map_pmap(map)->pm_evictions > 0 &&
    239 		    pmap_pte_spill(vm_map_pmap(map), trunc_page(tf->tf_dar),
    240 				   false)) {
    241 			break;
    242 		}
    243 
    244 		KASSERT(pcb->pcb_onfault == NULL);
    245 		rv = uvm_fault(map, trunc_page(tf->tf_dar), ftype);
    246 		if (rv == 0) {
    247 			/*
    248 			 * Record any stack growth...
    249 			 */
    250 			uvm_grow(p, trunc_page(tf->tf_dar));
    251 			break;
    252 		}
    253 		ci->ci_ev_udsi_fatal.ev_count++;
    254 		if (cpu_printfataltraps
    255 		    && (p->p_slflag & PSL_TRACED) == 0
    256 		    && !sigismember(&p->p_sigctx.ps_sigcatch, SIGSEGV)) {
    257 			printf("trap: pid %d.%d (%s): user %s DSI trap @ %#lx "
    258 			    "by %#lx (DSISR %#x, err=%d)\n",
    259 			    p->p_pid, l->l_lid, p->p_comm,
    260 			    (tf->tf_dsisr & DSISR_STORE) ? "write" : "read",
    261 			    tf->tf_dar, tf->tf_srr0, tf->tf_dsisr, rv);
    262 		}
    263 		KSI_INIT_TRAP(&ksi);
    264 		ksi.ksi_trap = EXC_DSI;
    265 		ksi.ksi_addr = (void *)tf->tf_dar;
    266 vm_signal:
    267 		switch (rv) {
    268 		case EINVAL:
    269 			ksi.ksi_signo = SIGBUS;
    270 			ksi.ksi_code = BUS_ADRERR;
    271 			break;
    272 		case EACCES:
    273 			ksi.ksi_signo = SIGSEGV;
    274 			ksi.ksi_code = SEGV_ACCERR;
    275 			break;
    276 		case ENOMEM:
    277 			ksi.ksi_signo = SIGKILL;
    278 			printf("UVM: pid %d.%d (%s), uid %d killed: "
    279 			       "out of swap\n", p->p_pid, l->l_lid, p->p_comm,
    280 			       l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
    281 			break;
    282 		default:
    283 			ksi.ksi_signo = SIGSEGV;
    284 			ksi.ksi_code = SEGV_MAPERR;
    285 			break;
    286 		}
    287 		(*p->p_emul->e_trapsignal)(l, &ksi);
    288 		break;
    289 
    290 	case EXC_ISI:
    291 		ci->ci_ev_kisi.ev_count++;
    292 
    293 		printf("trap: kernel ISI by %#lx (SRR1 %#lx), lr: %#lx\n",
    294 		    tf->tf_srr0, tf->tf_srr1, tf->tf_lr);
    295 		goto brain_damage2;
    296 
    297 	case EXC_ISI|EXC_USER:
    298 		ci->ci_ev_isi.ev_count++;
    299 
    300 		/*
    301 		 * Try to spill an evicted pte into the page table
    302 		 * if this wasn't a protection fault and the pmap
    303 		 * has some evicted pte's.
    304 		 */
    305 		map = &p->p_vmspace->vm_map;
    306 #ifdef PPC_OEA64
    307 		if (vm_map_pmap(map)->pm_ste_evictions > 0 &&
    308 		    pmap_ste_spill(vm_map_pmap(map), trunc_page(tf->tf_srr0),
    309 				   true)) {
    310 			break;
    311 		}
    312 #endif
    313 
    314 		if (vm_map_pmap(map)->pm_evictions > 0 &&
    315 		    pmap_pte_spill(vm_map_pmap(map), trunc_page(tf->tf_srr0),
    316 				   true)) {
    317 			break;
    318 		}
    319 
    320 		ftype = VM_PROT_EXECUTE;
    321 		KASSERT(pcb->pcb_onfault == NULL);
    322 		rv = uvm_fault(map, trunc_page(tf->tf_srr0), ftype);
    323 		if (rv == 0) {
    324 			break;
    325 		}
    326 		ci->ci_ev_isi_fatal.ev_count++;
    327 		if (cpu_printfataltraps
    328 		    && (p->p_slflag & PSL_TRACED) == 0
    329 		    && !sigismember(&p->p_sigctx.ps_sigcatch, SIGSEGV)) {
    330 			printf("trap: pid %d.%d (%s): user ISI trap @ %#lx "
    331 			    "(SRR1=%#lx)\n", p->p_pid, l->l_lid, p->p_comm,
    332 			    tf->tf_srr0, tf->tf_srr1);
    333 		}
    334 		KSI_INIT_TRAP(&ksi);
    335 		ksi.ksi_trap = EXC_ISI;
    336 		ksi.ksi_addr = (void *)tf->tf_srr0;
    337 		goto vm_signal;
    338 
    339 	case EXC_FPU|EXC_USER:
    340 		ci->ci_ev_fpu.ev_count++;
    341 		fpu_load();
    342 		break;
    343 
    344 	case EXC_AST|EXC_USER:
    345 		cpu_ast(l, ci);
    346 		break;
    347 
    348 	case EXC_ALI|EXC_USER:
    349 		ci->ci_ev_ali.ev_count++;
    350 		if (fix_unaligned(l, tf) != 0) {
    351 			ci->ci_ev_ali_fatal.ev_count++;
    352 			if (cpu_printfataltraps
    353 			    && (p->p_slflag & PSL_TRACED) == 0
    354 			    && !sigismember(&p->p_sigctx.ps_sigcatch, SIGBUS)) {
    355 				printf("trap: pid %d.%d (%s): user ALI trap @ "
    356 				    "%#lx by %#lx (DSISR %#x)\n",
    357 				    p->p_pid, l->l_lid, p->p_comm,
    358 				    tf->tf_dar, tf->tf_srr0, tf->tf_dsisr);
    359 			}
    360 			KSI_INIT_TRAP(&ksi);
    361 			ksi.ksi_signo = SIGBUS;
    362 			ksi.ksi_trap = EXC_ALI;
    363 			ksi.ksi_addr = (void *)tf->tf_dar;
    364 			ksi.ksi_code = BUS_ADRALN;
    365 			(*p->p_emul->e_trapsignal)(l, &ksi);
    366 		} else
    367 			tf->tf_srr0 += 4;
    368 		break;
    369 
    370 	case EXC_PERF|EXC_USER:
    371 		/* Not really, but needed due to how trap_subr.S works */
    372 	case EXC_VEC|EXC_USER:
    373 		ci->ci_ev_vec.ev_count++;
    374 #ifdef ALTIVEC
    375 		vec_load();
    376 		break;
    377 #else
    378 		if (cpu_printfataltraps
    379 		    && (p->p_slflag & PSL_TRACED) == 0
    380 		    && !sigismember(&p->p_sigctx.ps_sigcatch, SIGILL)) {
    381 			printf("trap: pid %d.%d (%s): user VEC trap @ %#lx "
    382 			    "(SRR1=%#lx)\n",
    383 			    p->p_pid, l->l_lid, p->p_comm,
    384 			    tf->tf_srr0, tf->tf_srr1);
    385 		}
    386 		KSI_INIT_TRAP(&ksi);
    387 		ksi.ksi_signo = SIGILL;
    388 		ksi.ksi_trap = EXC_PGM;
    389 		ksi.ksi_addr = (void *)tf->tf_srr0;
    390 		ksi.ksi_code = ILL_ILLOPC;
    391 		(*p->p_emul->e_trapsignal)(l, &ksi);
    392 		break;
    393 #endif
    394 	case EXC_MCHK|EXC_USER:
    395 		ci->ci_ev_umchk.ev_count++;
    396 		if (cpu_printfataltraps
    397 		    && (p->p_slflag & PSL_TRACED) == 0
    398 		    && !sigismember(&p->p_sigctx.ps_sigcatch, SIGBUS)) {
    399 			printf("trap: pid %d (%s): user MCHK trap @ %#lx "
    400 			    "(SRR1=%#lx)\n",
    401 			    p->p_pid, p->p_comm, tf->tf_srr0, tf->tf_srr1);
    402 		}
    403 		KSI_INIT_TRAP(&ksi);
    404 		ksi.ksi_signo = SIGBUS;
    405 		ksi.ksi_trap = EXC_MCHK;
    406 		ksi.ksi_addr = (void *)tf->tf_srr0;
    407 		ksi.ksi_code = BUS_OBJERR;
    408 		(*p->p_emul->e_trapsignal)(l, &ksi);
    409 		break;
    410 
    411 	case EXC_PGM|EXC_USER:
    412 		ci->ci_ev_pgm.ev_count++;
    413 		if (tf->tf_srr1 & 0x00020000) {	/* Bit 14 is set if trap */
    414 			if (p->p_raslist == NULL ||
    415 			    ras_lookup(p, (void *)tf->tf_srr0) == (void *) -1) {
    416 				KSI_INIT_TRAP(&ksi);
    417 				ksi.ksi_signo = SIGTRAP;
    418 				ksi.ksi_trap = EXC_PGM;
    419 				ksi.ksi_addr = (void *)tf->tf_srr0;
    420 				ksi.ksi_code = TRAP_BRKPT;
    421 				(*p->p_emul->e_trapsignal)(l, &ksi);
    422 			} else {
    423 				/* skip the trap instruction */
    424 				tf->tf_srr0 += 4;
    425 			}
    426 		} else {
    427 			KSI_INIT_TRAP(&ksi);
    428 			ksi.ksi_signo = SIGILL;
    429 			ksi.ksi_trap = EXC_PGM;
    430 			ksi.ksi_addr = (void *)tf->tf_srr0;
    431 			if (tf->tf_srr1 & 0x100000) {
    432 				ksi.ksi_signo = SIGFPE;
    433 				ksi.ksi_code = fpu_get_fault_code();
    434 			} else if (tf->tf_srr1 & 0x40000) {
    435 				if (emulate_privileged(l, tf)) {
    436 					tf->tf_srr0 += 4;
    437 					break;
    438 				}
    439 				ksi.ksi_code = ILL_PRVOPC;
    440 			} else
    441 				ksi.ksi_code = ILL_ILLOPC;
    442 			if (cpu_printfataltraps
    443 			    && (p->p_slflag & PSL_TRACED) == 0
    444 			    && !sigismember(&p->p_sigctx.ps_sigcatch,
    445 				    ksi.ksi_signo)) {
    446 				printf("trap: pid %d.%d (%s): user PGM trap @"
    447 				    " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid,
    448 				    p->p_comm, tf->tf_srr0, tf->tf_srr1);
    449 			}
    450 			(*p->p_emul->e_trapsignal)(l, &ksi);
    451 		}
    452 		break;
    453 
    454 	case EXC_MCHK: {
    455 		struct faultbuf *fb;
    456 
    457 		if ((fb = pcb->pcb_onfault) != NULL) {
    458 			tf->tf_srr0 = fb->fb_pc;
    459 			tf->tf_fixreg[1] = fb->fb_sp;
    460 			tf->tf_fixreg[2] = fb->fb_r2;
    461 			tf->tf_fixreg[3] = EFAULT;
    462 			tf->tf_cr = fb->fb_cr;
    463 			memcpy(&tf->tf_fixreg[13], fb->fb_fixreg,
    464 			    sizeof(fb->fb_fixreg));
    465 			return;
    466 		}
    467 		printf("trap: pid %d.%d (%s): kernel MCHK trap @"
    468 		    " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid,
    469 		    p->p_comm, tf->tf_srr0, tf->tf_srr1);
    470 		goto brain_damage2;
    471 	}
    472 	case EXC_ALI:
    473 		printf("trap: pid %d.%d (%s): kernel ALI trap @ %#lx by %#lx "
    474 		    "(DSISR %#x)\n", p->p_pid, l->l_lid, p->p_comm,
    475 		    tf->tf_dar, tf->tf_srr0, tf->tf_dsisr);
    476 		goto brain_damage2;
    477 	case EXC_PGM:
    478 		printf("trap: pid %d.%d (%s): kernel PGM trap @"
    479 		    " %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid,
    480 		    p->p_comm, tf->tf_srr0, tf->tf_srr1);
    481 		goto brain_damage2;
    482 
    483 	default:
    484 		printf("trap type %x at %lx\n", type, tf->tf_srr0);
    485 brain_damage2:
    486 #if defined(DDB) && 0 /* XXX */
    487 		if (kdb_trap(type, tf))
    488 			return;
    489 #endif
    490 #ifdef TRAP_PANICWAIT
    491 		printf("Press a key to panic.\n");
    492 		cnpollc(1);
    493 		cngetc();
    494 		cnpollc(0);
    495 #endif
    496 		panic("trap");
    497 	}
    498 	userret(l, tf);
    499 }
    500 
    501 #ifdef _LP64
    502 static inline vaddr_t
    503 setusr(vaddr_t uva, size_t *len_p)
    504 {
    505 	*len_p = SEGMENT_LENGTH - (uva & ~SEGMENT_MASK);
    506 	return pmap_setusr(uva) + (uva & ~SEGMENT_MASK);
    507 }
    508 static void
    509 unsetusr(void)
    510 {
    511 	pmap_unsetusr();
    512 }
    513 #else
    514 static inline vaddr_t
    515 setusr(vaddr_t uva, size_t *len_p)
    516 {
    517 	struct pcb *pcb = curpcb;
    518 	vaddr_t p;
    519 	KASSERT(pcb != NULL);
    520 	KASSERT(pcb->pcb_kmapsr == 0);
    521 	pcb->pcb_kmapsr = USER_SR;
    522 	pcb->pcb_umapsr = uva >> ADDR_SR_SHFT;
    523 	*len_p = SEGMENT_LENGTH - (uva & ~SEGMENT_MASK);
    524 	p = (USER_SR << ADDR_SR_SHFT) + (uva & ~SEGMENT_MASK);
    525 	__asm volatile ("isync; mtsr %0,%1; isync"
    526 	    ::	"n"(USER_SR), "r"(pcb->pcb_pm->pm_sr[pcb->pcb_umapsr]));
    527 	return p;
    528 }
    529 
    530 static void
    531 unsetusr(void)
    532 {
    533 	curpcb->pcb_kmapsr = 0;
    534 	__asm volatile ("isync; mtsr %0,%1; isync"
    535 	    ::	"n"(USER_SR), "r"(EMPTY_SEGMENT));
    536 }
    537 #endif
    538 
    539 #define	UFETCH(sz)							\
    540 int									\
    541 _ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp)	\
    542 {									\
    543 	struct faultbuf env;						\
    544 	vaddr_t p;							\
    545 	size_t seglen;							\
    546 	int rv;								\
    547 									\
    548 	if ((rv = setfault(&env)) != 0) {				\
    549 		goto out;						\
    550 	}								\
    551 	p = setusr((vaddr_t)uaddr, &seglen);				\
    552 	*valp = *(const volatile uint ## sz ## _t *)p;			\
    553  out:									\
    554 	unsetusr();							\
    555 	curpcb->pcb_onfault = 0;					\
    556 	return rv;							\
    557 }
    558 
    559 UFETCH(8)
    560 UFETCH(16)
    561 UFETCH(32)
    562 #ifdef _LP64
    563 UFETCH(64)
    564 #endif
    565 
    566 #undef UFETCH
    567 
    568 #define	USTORE(sz)							\
    569 int									\
    570 _ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val)		\
    571 {									\
    572 	struct faultbuf env;						\
    573 	vaddr_t p;							\
    574 	size_t seglen;							\
    575 	int rv;								\
    576 									\
    577 	if ((rv = setfault(&env)) != 0) {				\
    578 		goto out;						\
    579 	}								\
    580 	p = setusr((vaddr_t)uaddr, &seglen);				\
    581 	*(volatile uint ## sz ## _t *)p = val;				\
    582  out:									\
    583 	unsetusr();							\
    584 	curpcb->pcb_onfault = 0;					\
    585 	return rv;							\
    586 }
    587 
    588 USTORE(8)
    589 USTORE(16)
    590 USTORE(32)
    591 #ifdef _LP64
    592 USTORE(64)
    593 #endif
    594 
    595 #undef USTORE
    596 
    597 int
    598 copyin(const void *udaddr, void *kaddr, size_t len)
    599 {
    600 	vaddr_t uva = (vaddr_t) udaddr;
    601 	char *kp = kaddr;
    602 	struct faultbuf env;
    603 	int rv;
    604 
    605 	if ((rv = setfault(&env)) != 0) {
    606 		unsetusr();
    607 		goto out;
    608 	}
    609 
    610 	while (len > 0) {
    611 		size_t seglen;
    612 		vaddr_t p = setusr(uva, &seglen);
    613 		if (seglen > len)
    614 			seglen = len;
    615 		memcpy(kp, (const char *) p, seglen);
    616 		uva += seglen;
    617 		kp += seglen;
    618 		len -= seglen;
    619 		unsetusr();
    620 	}
    621 
    622   out:
    623 	curpcb->pcb_onfault = 0;
    624 	return rv;
    625 }
    626 
    627 int
    628 copyout(const void *kaddr, void *udaddr, size_t len)
    629 {
    630 	const char *kp = kaddr;
    631 	vaddr_t uva = (vaddr_t) udaddr;
    632 	struct faultbuf env;
    633 	int rv;
    634 
    635 	if ((rv = setfault(&env)) != 0) {
    636 		unsetusr();
    637 		goto out;
    638 	}
    639 
    640 	while (len > 0) {
    641 		size_t seglen;
    642 		vaddr_t p = setusr(uva, &seglen);
    643 		if (seglen > len)
    644 			seglen = len;
    645 		memcpy((char *)p, kp, seglen);
    646 		uva += seglen;
    647 		kp += seglen;
    648 		len -= seglen;
    649 		unsetusr();
    650 	}
    651 
    652   out:
    653 	curpcb->pcb_onfault = 0;
    654 	return rv;
    655 }
    656 
    657 /*
    658  * kcopy(const void *src, void *dst, size_t len);
    659  *
    660  * Copy len bytes from src to dst, aborting if we encounter a fatal
    661  * page fault.
    662  *
    663  * kcopy() _must_ save and restore the old fault handler since it is
    664  * called by uiomove(), which may be in the path of servicing a non-fatal
    665  * page fault.
    666  */
    667 int
    668 kcopy(const void *src, void *dst, size_t len)
    669 {
    670 	struct faultbuf env, *oldfault;
    671 	int rv;
    672 
    673 	oldfault = curpcb->pcb_onfault;
    674 
    675 	if ((rv = setfault(&env)) == 0)
    676 		memcpy(dst, src, len);
    677 
    678 	curpcb->pcb_onfault = oldfault;
    679 	return rv;
    680 }
    681 
    682 #if 0 /* XXX CPU configuration spaghetti */
    683 int
    684 _ucas_32(volatile uint32_t *uptr, uint32_t old, uint32_t new, uint32_t *ret)
    685 {
    686 	extern int do_ucas_32(volatile int32_t *, int32_t, int32_t, int32_t *);
    687 	vaddr_t uva = (vaddr_t)uptr;
    688 	vaddr_t p;
    689 	struct faultbuf env;
    690 	size_t seglen;
    691 	int rv;
    692 
    693 	if ((rv = setfault(&env)) != 0) {
    694 		unsetusr();
    695 		goto out;
    696 	}
    697 	p = setusr(uva, &seglen);
    698 	KASSERT(seglen >= sizeof(*uptr));
    699 	do_ucas_32((void *)p, old, new, ret);
    700 	unsetusr();
    701 
    702 out:
    703 	curpcb->pcb_onfault = 0;
    704 	return rv;
    705 }
    706 #endif
    707 
    708 int
    709 badaddr(void *addr, size_t size)
    710 {
    711 	return badaddr_read(addr, size, NULL);
    712 }
    713 
    714 int
    715 badaddr_read(void *addr, size_t size, int *rptr)
    716 {
    717 	struct faultbuf env;
    718 	int x;
    719 
    720 	/* Get rid of any stale machine checks that have been waiting.  */
    721 	__asm volatile ("sync; isync");
    722 
    723 	if (setfault(&env)) {
    724 		curpcb->pcb_onfault = 0;
    725 		__asm volatile ("sync");
    726 		return 1;
    727 	}
    728 
    729 	__asm volatile ("sync");
    730 
    731 	switch (size) {
    732 	case 1:
    733 		x = *(volatile int8_t *)addr;
    734 		break;
    735 	case 2:
    736 		x = *(volatile int16_t *)addr;
    737 		break;
    738 	case 4:
    739 		x = *(volatile int32_t *)addr;
    740 		break;
    741 	default:
    742 		panic("badaddr: invalid size (%lu)", (u_long) size);
    743 	}
    744 
    745 	/* Make sure we took the machine check, if we caused one. */
    746 	__asm volatile ("sync; isync");
    747 
    748 	curpcb->pcb_onfault = 0;
    749 	__asm volatile ("sync");	/* To be sure. */
    750 
    751 	/* Use the value to avoid reorder. */
    752 	if (rptr)
    753 		*rptr = x;
    754 
    755 	return 0;
    756 }
    757 
    758 /*
    759  * For now, this only deals with the particular unaligned access case
    760  * that gcc tends to generate.  Eventually it should handle all of the
    761  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
    762  */
    763 
    764 static int
    765 fix_unaligned(struct lwp *l, struct trapframe *tf)
    766 {
    767 	const struct dsi_info* dsi = get_dsi_info(tf->tf_dsisr);
    768 
    769 	if ( !dsi )
    770 	    return -1;
    771 
    772 	switch (dsi->indicator) {
    773 	case EXC_ALI_DCBZ:
    774 		{
    775 			/*
    776 			 * The DCBZ (Data Cache Block Zero) instruction
    777 			 * gives an alignment fault if used on non-cacheable
    778 			 * memory.  We handle the fault mainly for the
    779 			 * case when we are running with the cache disabled
    780 			 * for debugging.
    781 			 */
    782 			static char zeroes[MAXCACHELINESIZE];
    783 			int error;
    784 			error = copyout(zeroes,
    785 			    (void *)(tf->tf_dar & -curcpu()->ci_ci.dcache_line_size),
    786 			    curcpu()->ci_ci.dcache_line_size);
    787 			if (error)
    788 				return -1;
    789 			return 0;
    790 		}
    791 		break;
    792 
    793 	case EXC_ALI_LFD:
    794 	case EXC_ALI_LFDU:
    795 	case EXC_ALI_LDFX:
    796 	case EXC_ALI_LFDUX:
    797 		{
    798 			struct pcb * const pcb = lwp_getpcb(l);
    799 			const int reg = EXC_ALI_RST(tf->tf_dsisr);
    800 			const int a_reg = EXC_ALI_RA(tf->tf_dsisr);
    801 			uint64_t * const fpreg = &pcb->pcb_fpu.fpreg[reg];
    802 			register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
    803 
    804 			/*
    805 			 * Juggle the FPU to ensure that we've initialized
    806 			 * the FPRs, and that their current state is in
    807 			 * the PCB.
    808 			 */
    809 
    810 			KASSERT(l == curlwp);
    811 			if (!fpu_used_p(l)) {
    812 				memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
    813 				fpu_mark_used(l);
    814 			} else {
    815 				fpu_save(l);
    816 			}
    817 
    818 			if (copyin((void *)tf->tf_dar, fpreg,
    819 				   sizeof(double)) != 0)
    820 				return -1;
    821 
    822 			if (dsi->flags & DSI_OP_INDEXED) {
    823 			    /* do nothing */
    824 			}
    825 
    826 			if (dsi->flags & DSI_OP_UPDATE) {
    827 			    /* this is valid for 601, but to simplify logic don't pass for any */
    828 			    if (a_reg == 0)
    829 				return -1;
    830 			    else
    831 				*a_reg_addr = tf->tf_dar;
    832 			}
    833 
    834 			fpu_load();
    835 			return 0;
    836 		}
    837 		break;
    838 
    839 	case EXC_ALI_STFD:
    840 	case EXC_ALI_STFDU:
    841 	case EXC_ALI_STFDX:
    842 	case EXC_ALI_STFDUX:
    843 		{
    844 			struct pcb * const pcb = lwp_getpcb(l);
    845 			const int reg = EXC_ALI_RST(tf->tf_dsisr);
    846 			const int a_reg = EXC_ALI_RA(tf->tf_dsisr);
    847 			uint64_t * const fpreg = &pcb->pcb_fpu.fpreg[reg];
    848 			register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
    849 
    850 			/*
    851 			 * Juggle the FPU to ensure that we've initialized
    852 			 * the FPRs, and that their current state is in
    853 			 * the PCB.
    854 			 */
    855 
    856 			KASSERT(l == curlwp);
    857 			if (!fpu_used_p(l)) {
    858 				memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
    859 				fpu_mark_used(l);
    860 			} else {
    861 				fpu_save(l);
    862 			}
    863 
    864 			if (copyout(fpreg, (void *)tf->tf_dar,
    865 				    sizeof(double)) != 0)
    866 				return -1;
    867 
    868 			if (dsi->flags & DSI_OP_INDEXED) {
    869 			    /* do nothing */
    870 			}
    871 
    872 			if (dsi->flags & DSI_OP_UPDATE) {
    873 			    /* this is valid for 601, but to simplify logic don't pass for any */
    874 			    if (a_reg == 0)
    875 				return -1;
    876 			    else
    877 				*a_reg_addr = tf->tf_dar;
    878 			}
    879 
    880 			fpu_load();
    881 			return 0;
    882 		}
    883 		break;
    884 
    885 	case EXC_ALI_LHZ:
    886 	case EXC_ALI_LHZU:
    887 	case EXC_ALI_LHZX:
    888 	case EXC_ALI_LHZUX:
    889 	case EXC_ALI_LHA:
    890 	case EXC_ALI_LHAU:
    891 	case EXC_ALI_LHAX:
    892 	case EXC_ALI_LHAUX:
    893 	case EXC_ALI_LHBRX:
    894 		{
    895 		    const register_t ea_addr = tf->tf_dar;
    896 		    const unsigned int t_reg = EXC_ALI_RST(tf->tf_dsisr);
    897 		    const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
    898 		    register_t* t_reg_addr = &tf->tf_fixreg[t_reg];
    899 		    register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
    900 
    901 		    /* load into lower 2 bytes of reg */
    902 		    if (copyin((void *)ea_addr,
    903 			       t_reg_addr+2,
    904 			       sizeof(uint16_t)) != 0)
    905 			return -1;
    906 
    907 		    if (dsi->flags & DSI_OP_UPDATE) {
    908 			/* this is valid for 601, but to simplify logic don't pass for any */
    909 			if (a_reg == 0)
    910 			    return -1;
    911 			else
    912 			    *a_reg_addr = ea_addr;
    913 		    }
    914 
    915 		    if (dsi->flags & DSI_OP_INDEXED) {
    916 			/* do nothing , indexed address already in ea */
    917 		    }
    918 
    919 		    if (dsi->flags & DSI_OP_ZERO) {
    920 			/* clear upper 2 bytes */
    921 			*t_reg_addr &= 0x0000ffff;
    922 		    } else if (dsi->flags & DSI_OP_ALGEBRAIC) {
    923 			/* sign extend upper 2 bytes */
    924 			if (*t_reg_addr & 0x00008000)
    925 			    *t_reg_addr |= 0xffff0000;
    926 			else
    927 			    *t_reg_addr &= 0x0000ffff;
    928 		    }
    929 
    930 		    if (dsi->flags & DSI_OP_REVERSED) {
    931 			/* reverse lower 2 bytes */
    932 			uint32_t temp = *t_reg_addr;
    933 
    934 			*t_reg_addr = ((temp & 0x000000ff) << 8 ) |
    935 			              ((temp & 0x0000ff00) >> 8 );
    936 		    }
    937 		    return 0;
    938 		}
    939 		break;
    940 
    941 	case EXC_ALI_STH:
    942 	case EXC_ALI_STHU:
    943 	case EXC_ALI_STHX:
    944 	case EXC_ALI_STHUX:
    945 	case EXC_ALI_STHBRX:
    946 		{
    947 		    const register_t ea_addr = tf->tf_dar;
    948 		    const unsigned int s_reg = EXC_ALI_RST(tf->tf_dsisr);
    949 		    const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
    950 		    register_t* s_reg_addr = &tf->tf_fixreg[s_reg];
    951 		    register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
    952 
    953 		    /* byte-reversed write out of lower 2 bytes */
    954 		    if (dsi->flags & DSI_OP_REVERSED) {
    955 			uint16_t tmp = *s_reg_addr & 0xffff;
    956 			tmp = bswap16(tmp);
    957 
    958 			if (copyout(&tmp,
    959 				    (void *)ea_addr,
    960 				    sizeof(uint16_t)) != 0)
    961 			    return -1;
    962 		    }
    963 		    /* write out lower 2 bytes */
    964 		    else if (copyout(s_reg_addr+2,
    965 				     (void *)ea_addr,
    966 				     sizeof(uint16_t)) != 0) {
    967 			return -1;
    968 		    }
    969 
    970 		    if (dsi->flags & DSI_OP_INDEXED) {
    971 			/* do nothing, indexed address already in ea */
    972 		    }
    973 
    974 		    if (dsi->flags & DSI_OP_UPDATE) {
    975 			/* this is valid for 601, but to simplify logic don't pass for any */
    976 			if (a_reg == 0)
    977 			    return -1;
    978 			else
    979 			    *a_reg_addr = ea_addr;
    980 		    }
    981 
    982 		    return 0;
    983 		}
    984 		break;
    985 
    986 	case EXC_ALI_LWARX_LWZ:
    987 	case EXC_ALI_LWZU:
    988 	case EXC_ALI_LWZX:
    989 	case EXC_ALI_LWZUX:
    990 	case EXC_ALI_LWBRX:
    991 		{
    992 		    const register_t ea_addr = tf->tf_dar;
    993 		    const unsigned int t_reg = EXC_ALI_RST(tf->tf_dsisr);
    994 		    const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
    995 		    register_t* t_reg_addr = &tf->tf_fixreg[t_reg];
    996 		    register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
    997 
    998 		    if (copyin((void *)ea_addr,
    999 			       t_reg_addr,
   1000 			       sizeof(uint32_t)) != 0)
   1001 			return -1;
   1002 
   1003 		    if (dsi->flags & DSI_OP_UPDATE) {
   1004 			/* this is valid for 601, but to simplify logic don't pass for any */
   1005 			if (a_reg == 0)
   1006 			    return -1;
   1007 			else
   1008 			    *a_reg_addr = ea_addr;
   1009 		    }
   1010 
   1011 		    if (dsi->flags & DSI_OP_INDEXED) {
   1012 			/* do nothing , indexed address already in ea */
   1013 		    }
   1014 
   1015 		    if (dsi->flags & DSI_OP_ZERO) {
   1016 			/* XXX - 64bit clear upper word */
   1017 		    }
   1018 
   1019 		    if (dsi->flags & DSI_OP_REVERSED) {
   1020 			/* reverse  bytes */
   1021 			register_t temp = bswap32(*t_reg_addr);
   1022 			*t_reg_addr = temp;
   1023 		    }
   1024 
   1025 		    return 0;
   1026 		}
   1027 		break;
   1028 
   1029 	case EXC_ALI_STW:
   1030 	case EXC_ALI_STWU:
   1031 	case EXC_ALI_STWX:
   1032 	case EXC_ALI_STWUX:
   1033 	case EXC_ALI_STWBRX:
   1034 		{
   1035 		    const register_t ea_addr = tf->tf_dar;
   1036 		    const unsigned int s_reg = EXC_ALI_RST(tf->tf_dsisr);
   1037 		    const unsigned int a_reg = EXC_ALI_RA(tf->tf_dsisr);
   1038 		    register_t* s_reg_addr = &tf->tf_fixreg[s_reg];
   1039 		    register_t* a_reg_addr = &tf->tf_fixreg[a_reg];
   1040 
   1041 		    if (dsi->flags & DSI_OP_REVERSED) {
   1042 			/* byte-reversed write out */
   1043 			register_t temp = bswap32(*s_reg_addr);
   1044 
   1045 			if (copyout(&temp,
   1046 				    (void *)ea_addr,
   1047 				    sizeof(uint32_t)) != 0)
   1048 			    return -1;
   1049 		    }
   1050 		    /* write out word */
   1051 		    else if (copyout(s_reg_addr,
   1052 				     (void *)ea_addr,
   1053 				     sizeof(uint32_t)) != 0)
   1054 			return -1;
   1055 
   1056 		    if (dsi->flags & DSI_OP_INDEXED) {
   1057 			/* do nothing, indexed address already in ea */
   1058 		    }
   1059 
   1060 		    if (dsi->flags & DSI_OP_UPDATE) {
   1061 			/* this is valid for 601, but to simplify logic don't pass for any */
   1062 			if (a_reg == 0)
   1063 			    return -1;
   1064 			else
   1065 			    *a_reg_addr = ea_addr;
   1066 		    }
   1067 
   1068 		    return 0;
   1069 		}
   1070 		break;
   1071 	}
   1072 
   1073 	return -1;
   1074 }
   1075 
   1076 static int
   1077 emulate_privileged(struct lwp *l, struct trapframe *tf)
   1078 {
   1079 	uint32_t opcode;
   1080 
   1081 	if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0)
   1082 		return 0;
   1083 
   1084 	if (OPC_MFSPR_P(opcode, SPR_PVR)) {
   1085 		__asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)]));
   1086 		return 1;
   1087 	}
   1088 
   1089 	return emulate_mxmsr(l, tf, opcode);
   1090 }
   1091 
   1092 int
   1093 copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done)
   1094 {
   1095 	vaddr_t uva = (vaddr_t) udaddr;
   1096 	char *kp = kaddr;
   1097 	struct faultbuf env;
   1098 	int rv;
   1099 
   1100 	if ((rv = setfault(&env)) != 0) {
   1101 		unsetusr();
   1102 		goto out2;
   1103 	}
   1104 
   1105 	while (len > 0) {
   1106 		size_t seglen;
   1107 		vaddr_t p = setusr(uva, &seglen);
   1108 		if (seglen > len)
   1109 			seglen = len;
   1110 		len -= seglen;
   1111 		uva += seglen;
   1112 		for (; seglen-- > 0; p++) {
   1113 			if ((*kp++ = *(char *)p) == 0) {
   1114 				unsetusr();
   1115 				goto out;
   1116 			}
   1117 		}
   1118 		unsetusr();
   1119 	}
   1120 	rv = ENAMETOOLONG;
   1121 
   1122  out:
   1123 	if (done != NULL)
   1124 		*done = kp - (char *) kaddr;
   1125  out2:
   1126 	curpcb->pcb_onfault = 0;
   1127 	return rv;
   1128 }
   1129 
   1130 
   1131 int
   1132 copyoutstr(const void *kaddr, void *udaddr, size_t len, size_t *done)
   1133 {
   1134 	const char *kp = kaddr;
   1135 	vaddr_t uva = (vaddr_t) udaddr;
   1136 	struct faultbuf env;
   1137 	int rv;
   1138 
   1139 	if ((rv = setfault(&env)) != 0) {
   1140 		unsetusr();
   1141 		goto out2;
   1142 	}
   1143 
   1144 	while (len > 0) {
   1145 		size_t seglen;
   1146 		vaddr_t p = setusr(uva, &seglen);
   1147 		if (seglen > len)
   1148 			seglen = len;
   1149 		len -= seglen;
   1150 		uva += seglen;
   1151 		for (; seglen-- > 0; p++) {
   1152 			if ((*(char *)p = *kp++) == 0) {
   1153 				unsetusr();
   1154 				goto out;
   1155 			}
   1156 		}
   1157 		unsetusr();
   1158 	}
   1159 	rv = ENAMETOOLONG;
   1160 
   1161  out:
   1162 	if (done != NULL)
   1163 		*done = kp - (const char*)kaddr;
   1164  out2:
   1165 	curpcb->pcb_onfault = 0;
   1166 	return rv;
   1167 }
   1168 
   1169 const struct dsi_info*
   1170 get_dsi_info(register_t dsisr)
   1171 {
   1172     static const struct dsi_info dsi[] =
   1173 	{
   1174 	    /* data cache block zero */
   1175 	    {EXC_ALI_DCBZ, 0},
   1176 
   1177 	    /* load halfwords */
   1178 	    {EXC_ALI_LHZ,   DSI_OP_ZERO},
   1179 	    {EXC_ALI_LHZU,  DSI_OP_ZERO|DSI_OP_UPDATE},
   1180 	    {EXC_ALI_LHZX,  DSI_OP_ZERO|DSI_OP_INDEXED},
   1181 	    {EXC_ALI_LHZUX, DSI_OP_ZERO|DSI_OP_UPDATE|DSI_OP_INDEXED},
   1182 	    {EXC_ALI_LHA,   DSI_OP_ALGEBRAIC},
   1183 	    {EXC_ALI_LHAU,  DSI_OP_ALGEBRAIC|DSI_OP_UPDATE},
   1184 	    {EXC_ALI_LHAX,  DSI_OP_ALGEBRAIC|DSI_OP_INDEXED},
   1185 	    {EXC_ALI_LHAUX, DSI_OP_ALGEBRAIC|DSI_OP_UPDATE|DSI_OP_INDEXED},
   1186 
   1187 	    /* store halfwords */
   1188 	    {EXC_ALI_STH,   0},
   1189 	    {EXC_ALI_STHU,  DSI_OP_UPDATE},
   1190 	    {EXC_ALI_STHX,  DSI_OP_INDEXED},
   1191 	    {EXC_ALI_STHUX, DSI_OP_UPDATE|DSI_OP_INDEXED},
   1192 
   1193 	    /* load words */
   1194 	    {EXC_ALI_LWARX_LWZ, DSI_OP_ZERO},
   1195 	    {EXC_ALI_LWZU,      DSI_OP_ZERO|DSI_OP_UPDATE},
   1196 	    {EXC_ALI_LWZX,      DSI_OP_ZERO|DSI_OP_INDEXED},
   1197 	    {EXC_ALI_LWZUX,     DSI_OP_ZERO|DSI_OP_UPDATE|DSI_OP_INDEXED},
   1198 
   1199 	    /* store words */
   1200 	    {EXC_ALI_STW,   0},
   1201 	    {EXC_ALI_STWU,  DSI_OP_UPDATE},
   1202 	    {EXC_ALI_STWX,  DSI_OP_INDEXED},
   1203 	    {EXC_ALI_STWUX, DSI_OP_UPDATE|DSI_OP_INDEXED},
   1204 
   1205 	    /* load byte-reversed */
   1206 	    {EXC_ALI_LHBRX, DSI_OP_REVERSED|DSI_OP_INDEXED|DSI_OP_ZERO},
   1207 	    {EXC_ALI_LWBRX, DSI_OP_REVERSED|DSI_OP_INDEXED},
   1208 
   1209 	    /* store byte-reversed */
   1210 	    {EXC_ALI_STHBRX, DSI_OP_REVERSED|DSI_OP_INDEXED},
   1211 	    {EXC_ALI_STWBRX, DSI_OP_REVERSED|DSI_OP_INDEXED},
   1212 
   1213 	    /* load float double-precision */
   1214 	    {EXC_ALI_LFD,   0},
   1215 	    {EXC_ALI_LFDU,  DSI_OP_UPDATE},
   1216 	    {EXC_ALI_LDFX,  DSI_OP_INDEXED},
   1217 	    {EXC_ALI_LFDUX, DSI_OP_UPDATE|DSI_OP_INDEXED},
   1218 
   1219 	    /* store float double precision */
   1220 	    {EXC_ALI_STFD,   0},
   1221 	    {EXC_ALI_STFDU,  DSI_OP_UPDATE},
   1222 	    {EXC_ALI_STFDX,  DSI_OP_INDEXED},
   1223 	    {EXC_ALI_STFDUX, DSI_OP_UPDATE|DSI_OP_INDEXED},
   1224 
   1225 	    /* XXX - ones below here not yet implemented in fix_unaligned() */
   1226 	    /* load float single precision */
   1227 	    {EXC_ALI_LFS,   0},
   1228 	    {EXC_ALI_LFSU,  DSI_OP_UPDATE},
   1229 	    {EXC_ALI_LSFX,  DSI_OP_INDEXED},
   1230 	    {EXC_ALI_LFSUX, DSI_OP_UPDATE|DSI_OP_INDEXED},
   1231 
   1232 	    /* store float single precision */
   1233 	    {EXC_ALI_STFS,   0},
   1234 	    {EXC_ALI_STFSU,  DSI_OP_UPDATE},
   1235 	    {EXC_ALI_STFSX,  DSI_OP_INDEXED},
   1236 	    {EXC_ALI_STFSUX, DSI_OP_UPDATE|DSI_OP_INDEXED},
   1237 
   1238 	    /* multiple */
   1239 	    {EXC_ALI_LMW,  0},
   1240 	    {EXC_ALI_STMW, 0},
   1241 
   1242 	    /* load & store string */
   1243 	    {EXC_ALI_LSWI, 0},
   1244 	    {EXC_ALI_LSWX, DSI_OP_INDEXED},
   1245 	    {EXC_ALI_STSWI, 0},
   1246 	    {EXC_ALI_STSWX, DSI_OP_INDEXED},
   1247 
   1248 	    /* get/send word from external */
   1249 	    {EXC_ALI_ECIWX, DSI_OP_INDEXED},
   1250 	    {EXC_ALI_ECOWX, DSI_OP_INDEXED},
   1251 
   1252 	    /* store float as integer word */
   1253 	    {EXC_ALI_STFIWX, 0},
   1254 
   1255 	    /* store conditional */
   1256 	    {EXC_ALI_LDARX, DSI_OP_INDEXED}, /* stdcx */
   1257 	    {EXC_ALI_STDCX, DSI_OP_INDEXED},
   1258 	    {EXC_ALI_STWCX, DSI_OP_INDEXED},  /* lwarx */
   1259 
   1260 #ifdef PPC_OEA64
   1261 	    /* 64 bit, load word algebriac */
   1262 	    {EXC_ALI_LWAX,  DSI_OP_ALGEBRAIC|DSI_OP_INDEXED},
   1263 	    {EXC_ALI_LWAUX, DSI_OP_ALGEBRAIC|DSI_OP_UPDATE|DSI_OP_INDEXED},
   1264 
   1265 	    /* 64 bit load doubleword */
   1266 	    {EXC_ALI_LD_LDU_LWA, 0},
   1267 	    {EXC_ALI_LDX,        DSI_OP_INDEXED},
   1268 	    {EXC_ALI_LDUX,       DSI_OP_UPDATE|DSI_OP_INDEXED},
   1269 
   1270 	    /* 64 bit store double word */
   1271 	    {EXC_ALI_STD_STDU, 0},
   1272 	    {EXC_ALI_STDX,     DSI_OP_INDEXED},
   1273 	    {EXC_ALI_STDUX,    DSI_OP_UPDATE|DSI_OP_INDEXED},
   1274 #endif
   1275 	};
   1276 
   1277     int num_elems = sizeof(dsi)/sizeof(dsi[0]);
   1278     int indicator = EXC_ALI_OPCODE_INDICATOR(dsisr);
   1279     int i;
   1280 
   1281     for (i = 0 ; i < num_elems; i++) {
   1282 	if (indicator == dsi[i].indicator){
   1283 	    return &dsi[i];
   1284 	}
   1285     }
   1286     return 0;
   1287 }
   1288