1 1.102 ad /* $NetBSD: trap.c,v 1.102 2023/10/05 19:41:05 ad Exp $ */ 2 1.1 simonb 3 1.1 simonb /* 4 1.1 simonb * Copyright 2001 Wasabi Systems, Inc. 5 1.1 simonb * All rights reserved. 6 1.1 simonb * 7 1.1 simonb * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 1.1 simonb * 9 1.1 simonb * Redistribution and use in source and binary forms, with or without 10 1.1 simonb * modification, are permitted provided that the following conditions 11 1.1 simonb * are met: 12 1.1 simonb * 1. Redistributions of source code must retain the above copyright 13 1.1 simonb * notice, this list of conditions and the following disclaimer. 14 1.1 simonb * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 simonb * notice, this list of conditions and the following disclaimer in the 16 1.1 simonb * documentation and/or other materials provided with the distribution. 17 1.1 simonb * 3. All advertising materials mentioning features or use of this software 18 1.1 simonb * must display the following acknowledgement: 19 1.1 simonb * This product includes software developed for the NetBSD Project by 20 1.1 simonb * Wasabi Systems, Inc. 21 1.1 simonb * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 1.1 simonb * or promote products derived from this software without specific prior 23 1.1 simonb * written permission. 24 1.1 simonb * 25 1.1 simonb * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 1.1 simonb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 1.1 simonb * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 1.1 simonb * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 1.1 simonb * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 1.1 simonb * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 1.1 simonb * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 1.1 simonb * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 1.1 simonb * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 1.1 simonb * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 1.1 simonb * POSSIBILITY OF SUCH DAMAGE. 36 1.1 simonb */ 37 1.1 simonb 38 1.1 simonb /* 39 1.1 simonb * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 1.1 simonb * Copyright (C) 1995, 1996 TooLs GmbH. 41 1.1 simonb * All rights reserved. 42 1.1 simonb * 43 1.1 simonb * Redistribution and use in source and binary forms, with or without 44 1.1 simonb * modification, are permitted provided that the following conditions 45 1.1 simonb * are met: 46 1.1 simonb * 1. Redistributions of source code must retain the above copyright 47 1.1 simonb * notice, this list of conditions and the following disclaimer. 48 1.1 simonb * 2. Redistributions in binary form must reproduce the above copyright 49 1.1 simonb * notice, this list of conditions and the following disclaimer in the 50 1.1 simonb * documentation and/or other materials provided with the distribution. 51 1.1 simonb * 3. All advertising materials mentioning features or use of this software 52 1.1 simonb * must display the following acknowledgement: 53 1.1 simonb * This product includes software developed by TooLs GmbH. 54 1.1 simonb * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 1.1 simonb * derived from this software without specific prior written permission. 56 1.1 simonb * 57 1.1 simonb * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 1.1 simonb * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 1.1 simonb * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 1.1 simonb * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 1.1 simonb * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 1.1 simonb * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 1.1 simonb * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 1.1 simonb * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 1.1 simonb * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 1.1 simonb * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 1.1 simonb */ 68 1.14 lukem 69 1.82 rin #define __UFETCHSTORE_PRIVATE 70 1.82 rin 71 1.14 lukem #include <sys/cdefs.h> 72 1.102 ad __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.102 2023/10/05 19:41:05 ad Exp $"); 73 1.1 simonb 74 1.82 rin #ifdef _KERNEL_OPT 75 1.1 simonb #include "opt_ddb.h" 76 1.44 garbled #include "opt_kgdb.h" 77 1.83 rin #include "opt_ppcarch.h" 78 1.87 rin #include "opt_ppcopts.h" 79 1.82 rin #endif 80 1.70 thorpej 81 1.1 simonb #include <sys/param.h> 82 1.72 rin #include <sys/cpu.h> 83 1.72 rin #include <sys/kauth.h> 84 1.1 simonb #include <sys/proc.h> 85 1.86 rin #include <sys/ptrace.h> 86 1.1 simonb #include <sys/reboot.h> 87 1.1 simonb #include <sys/syscall.h> 88 1.1 simonb #include <sys/systm.h> 89 1.1 simonb 90 1.44 garbled #if defined(KGDB) 91 1.44 garbled #include <sys/kgdb.h> 92 1.44 garbled #endif 93 1.44 garbled 94 1.1 simonb #include <uvm/uvm_extern.h> 95 1.1 simonb 96 1.1 simonb #include <dev/cons.h> 97 1.1 simonb 98 1.1 simonb #include <machine/fpu.h> 99 1.1 simonb #include <machine/frame.h> 100 1.1 simonb #include <machine/pcb.h> 101 1.1 simonb #include <machine/psl.h> 102 1.1 simonb #include <machine/trap.h> 103 1.1 simonb 104 1.61 matt #include <powerpc/db_machdep.h> 105 1.1 simonb #include <powerpc/spr.h> 106 1.74 rin #include <powerpc/userret.h> 107 1.61 matt 108 1.61 matt #include <powerpc/ibm4xx/cpu.h> 109 1.1 simonb #include <powerpc/ibm4xx/pmap.h> 110 1.71 rin #include <powerpc/ibm4xx/spr.h> 111 1.1 simonb #include <powerpc/ibm4xx/tlb.h> 112 1.61 matt 113 1.1 simonb #include <powerpc/fpu/fpu_extern.h> 114 1.1 simonb 115 1.1 simonb /* These definitions should probably be somewhere else XXX */ 116 1.1 simonb #define FIRSTARG 3 /* first argument is in reg 3 */ 117 1.1 simonb #define NARGREG 8 /* 8 args are in registers */ 118 1.40 christos #define MOREARGS(sp) ((void *)((int)(sp) + 8)) /* more args go here */ 119 1.1 simonb 120 1.50 dsl void trap(struct trapframe *); /* Called from locore / trap_subr */ 121 1.75 rin #if 0 122 1.75 rin /* Not currently used nor exposed externally in any header file */ 123 1.50 dsl int badaddr(void *, size_t); 124 1.50 dsl int badaddr_read(void *, size_t, int *); 125 1.75 rin #endif 126 1.50 dsl int ctx_setup(int, int); 127 1.1 simonb 128 1.87 rin #ifndef PPC_NO_UNALIGNED 129 1.87 rin static bool fix_unaligned(struct trapframe *, ksiginfo_t *); 130 1.87 rin #endif 131 1.87 rin 132 1.1 simonb #ifdef DEBUG 133 1.1 simonb #define TDB_ALL 0x1 134 1.1 simonb int trapdebug = /* TDB_ALL */ 0; 135 1.1 simonb #define DBPRINTF(x, y) if (trapdebug & (x)) printf y 136 1.1 simonb #else 137 1.1 simonb #define DBPRINTF(x, y) 138 1.1 simonb #endif 139 1.1 simonb 140 1.1 simonb void 141 1.58 matt trap(struct trapframe *tf) 142 1.1 simonb { 143 1.10 thorpej struct lwp *l = curlwp; 144 1.55 chs struct proc *p = l->l_proc; 145 1.53 rmind struct pcb *pcb; 146 1.58 matt int type = tf->tf_exc; 147 1.1 simonb int ftype, rv; 148 1.18 eeh ksiginfo_t ksi; 149 1.1 simonb 150 1.55 chs KASSERT(l->l_stat == LSONPROC); 151 1.1 simonb 152 1.58 matt if (tf->tf_srr1 & PSL_PR) { 153 1.1 simonb type |= EXC_USER; 154 1.35 ad } 155 1.1 simonb 156 1.1 simonb ftype = VM_PROT_READ; 157 1.1 simonb 158 1.13 simonb DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n", 159 1.58 matt type, tf->tf_srr0, tf, &tf)); 160 1.1 simonb 161 1.1 simonb switch (type) { 162 1.1 simonb case EXC_DEBUG|EXC_USER: 163 1.86 rin /* We don't use hardware breakpoints for userland. */ 164 1.86 rin goto brain_damage; 165 1.13 simonb 166 1.1 simonb case EXC_TRC|EXC_USER: 167 1.19 thorpej KSI_INIT_TRAP(&ksi); 168 1.17 matt ksi.ksi_signo = SIGTRAP; 169 1.17 matt ksi.ksi_trap = EXC_TRC; 170 1.58 matt ksi.ksi_addr = (void *)tf->tf_srr0; 171 1.17 matt trapsignal(l, &ksi); 172 1.1 simonb break; 173 1.7 simonb 174 1.1 simonb case EXC_DSI: 175 1.1 simonb /* FALLTHROUGH */ 176 1.1 simonb case EXC_DTMISS: 177 1.1 simonb { 178 1.1 simonb struct vm_map *map; 179 1.1 simonb vaddr_t va; 180 1.78 rin struct faultbuf *fb; 181 1.78 rin 182 1.78 rin pcb = lwp_getpcb(l); 183 1.78 rin fb = pcb->pcb_onfault; 184 1.78 rin 185 1.78 rin if (curcpu()->ci_idepth >= 0) { 186 1.78 rin rv = EFAULT; 187 1.78 rin goto out; 188 1.78 rin } 189 1.1 simonb 190 1.58 matt va = tf->tf_dear; 191 1.58 matt if (tf->tf_pid == KERNEL_PID) { 192 1.1 simonb map = kernel_map; 193 1.1 simonb } else { 194 1.1 simonb map = &p->p_vmspace->vm_map; 195 1.1 simonb } 196 1.1 simonb 197 1.58 matt if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 198 1.3 chs ftype = VM_PROT_WRITE; 199 1.1 simonb 200 1.13 simonb DBPRINTF(TDB_ALL, 201 1.13 simonb ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n", 202 1.58 matt tf->tf_srr0, 203 1.13 simonb (ftype & VM_PROT_WRITE) ? "write" : "read", 204 1.58 matt (void *)va, tf->tf_esr)); 205 1.58 matt 206 1.55 chs pcb->pcb_onfault = NULL; 207 1.32 drochner rv = uvm_fault(map, trunc_page(va), ftype); 208 1.55 chs pcb->pcb_onfault = fb; 209 1.1 simonb if (rv == 0) 210 1.68 rin return; 211 1.78 rin out: 212 1.55 chs if (fb != NULL) { 213 1.58 matt tf->tf_pid = KERNEL_PID; 214 1.58 matt tf->tf_srr0 = fb->fb_pc; 215 1.58 matt tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 216 1.58 matt tf->tf_cr = fb->fb_cr; 217 1.58 matt tf->tf_fixreg[1] = fb->fb_sp; 218 1.58 matt tf->tf_fixreg[2] = fb->fb_r2; 219 1.79 rin tf->tf_fixreg[3] = rv; 220 1.58 matt memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 221 1.11 matt sizeof(fb->fb_fixreg)); 222 1.68 rin return; 223 1.1 simonb } 224 1.1 simonb } 225 1.1 simonb goto brain_damage; 226 1.7 simonb 227 1.1 simonb case EXC_DSI|EXC_USER: 228 1.1 simonb /* FALLTHROUGH */ 229 1.1 simonb case EXC_DTMISS|EXC_USER: 230 1.58 matt if (tf->tf_esr & (ESR_DST|ESR_DIZ)) 231 1.3 chs ftype = VM_PROT_WRITE; 232 1.1 simonb 233 1.13 simonb DBPRINTF(TDB_ALL, 234 1.13 simonb ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n", 235 1.58 matt tf->tf_srr0, (ftype & VM_PROT_WRITE) ? "write" : "read", 236 1.58 matt tf->tf_dear, tf->tf_esr)); 237 1.13 simonb KASSERT(l == curlwp && (l->l_stat == LSONPROC)); 238 1.55 chs // KASSERT(curpcb->pcb_onfault == NULL); 239 1.58 matt rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_dear), 240 1.32 drochner ftype); 241 1.1 simonb if (rv == 0) { 242 1.13 simonb break; 243 1.1 simonb } 244 1.19 thorpej KSI_INIT_TRAP(&ksi); 245 1.17 matt ksi.ksi_trap = EXC_DSI; 246 1.58 matt ksi.ksi_addr = (void *)tf->tf_dear; 247 1.80 rin vm_signal: 248 1.80 rin switch (rv) { 249 1.80 rin case EINVAL: 250 1.80 rin ksi.ksi_signo = SIGBUS; 251 1.80 rin ksi.ksi_code = BUS_ADRERR; 252 1.80 rin break; 253 1.80 rin case EACCES: 254 1.80 rin ksi.ksi_signo = SIGSEGV; 255 1.80 rin ksi.ksi_code = SEGV_ACCERR; 256 1.80 rin break; 257 1.80 rin case ENOMEM: 258 1.17 matt ksi.ksi_signo = SIGKILL; 259 1.80 rin printf("UVM: pid %d.%d (%s), uid %d killed: " 260 1.80 rin "out of swap\n", p->p_pid, l->l_lid, p->p_comm, 261 1.80 rin l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 262 1.80 rin break; 263 1.80 rin default: 264 1.80 rin ksi.ksi_signo = SIGSEGV; 265 1.80 rin ksi.ksi_code = SEGV_MAPERR; 266 1.80 rin break; 267 1.1 simonb } 268 1.17 matt trapsignal(l, &ksi); 269 1.1 simonb break; 270 1.15 chs 271 1.1 simonb case EXC_ITMISS|EXC_USER: 272 1.1 simonb case EXC_ISI|EXC_USER: 273 1.15 chs ftype = VM_PROT_EXECUTE; 274 1.13 simonb DBPRINTF(TDB_ALL, 275 1.15 chs ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n", 276 1.58 matt tf->tf_srr0, tf)); 277 1.55 chs // KASSERT(curpcb->pcb_onfault == NULL); 278 1.58 matt rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_srr0), 279 1.32 drochner ftype); 280 1.1 simonb if (rv == 0) { 281 1.13 simonb break; 282 1.1 simonb } 283 1.85 rin isi: 284 1.19 thorpej KSI_INIT_TRAP(&ksi); 285 1.17 matt ksi.ksi_trap = EXC_ISI; 286 1.58 matt ksi.ksi_addr = (void *)tf->tf_srr0; 287 1.80 rin goto vm_signal; 288 1.1 simonb break; 289 1.1 simonb 290 1.1 simonb case EXC_AST|EXC_USER: 291 1.62 matt cpu_ast(l, curcpu()); 292 1.1 simonb break; 293 1.1 simonb 294 1.1 simonb case EXC_ALI|EXC_USER: 295 1.87 rin if (fix_unaligned(tf, &ksi)) 296 1.17 matt trapsignal(l, &ksi); 297 1.1 simonb break; 298 1.1 simonb 299 1.1 simonb case EXC_PGM|EXC_USER: 300 1.57 matt curcpu()->ci_data.cpu_ntrap++; 301 1.53 rmind 302 1.84 rin KSI_INIT_TRAP(&ksi); 303 1.84 rin ksi.ksi_trap = EXC_PGM; 304 1.84 rin ksi.ksi_addr = (void *)tf->tf_srr0; 305 1.1 simonb 306 1.84 rin if (tf->tf_esr & ESR_PTR) { 307 1.86 rin vaddr_t va; 308 1.84 rin sigtrap: 309 1.86 rin va = (vaddr_t)tf->tf_srr0; 310 1.86 rin /* 311 1.86 rin * Restore original instruction and clear BP. 312 1.86 rin */ 313 1.86 rin if (p->p_md.md_ss_addr[0] == va || 314 1.86 rin p->p_md.md_ss_addr[1] == va) { 315 1.86 rin rv = ppc_sstep(l, 0); 316 1.86 rin if (rv != 0) 317 1.86 rin goto vm_signal; 318 1.86 rin ksi.ksi_code = TRAP_TRACE; 319 1.86 rin } else 320 1.86 rin ksi.ksi_code = TRAP_BRKPT; 321 1.84 rin if (p->p_raslist != NULL && 322 1.86 rin ras_lookup(p, (void *)va) != (void *)-1) { 323 1.86 rin tf->tf_srr0 += (ksi.ksi_code == TRAP_TRACE) ? 324 1.86 rin 0 : 4; 325 1.65 matt break; 326 1.84 rin } 327 1.84 rin ksi.ksi_signo = SIGTRAP; 328 1.85 rin } else if (tf->tf_esr & ESR_PPR) { 329 1.85 rin uint32_t opcode; 330 1.85 rin 331 1.85 rin rv = copyin((void *)tf->tf_srr0, &opcode, 332 1.85 rin sizeof(opcode)); 333 1.85 rin if (rv) 334 1.85 rin goto isi; 335 1.85 rin if (emulate_mxmsr(l, tf, opcode)) { 336 1.85 rin tf->tf_srr0 += 4; 337 1.85 rin break; 338 1.85 rin } 339 1.85 rin 340 1.85 rin ksi.ksi_code = ILL_PRVOPC; 341 1.85 rin ksi.ksi_signo = SIGILL; 342 1.65 matt } else { 343 1.84 rin pcb = lwp_getpcb(l); 344 1.84 rin 345 1.84 rin if (__predict_false(!fpu_used_p(l))) { 346 1.84 rin memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu)); 347 1.84 rin fpu_mark_used(l); 348 1.84 rin } 349 1.84 rin 350 1.84 rin if (fpu_emulate(tf, &pcb->pcb_fpu, &ksi)) { 351 1.84 rin if (ksi.ksi_signo == 0) /* was emulated */ 352 1.84 rin break; 353 1.84 rin else if (ksi.ksi_signo == SIGTRAP) 354 1.84 rin goto sigtrap; /* XXX H/W bug? */ 355 1.84 rin } else { 356 1.84 rin ksi.ksi_code = ILL_ILLOPC; 357 1.84 rin ksi.ksi_signo = SIGILL; 358 1.84 rin } 359 1.1 simonb } 360 1.65 matt 361 1.65 matt trapsignal(l, &ksi); 362 1.1 simonb break; 363 1.1 simonb 364 1.1 simonb case EXC_MCHK: 365 1.1 simonb { 366 1.11 matt struct faultbuf *fb; 367 1.1 simonb 368 1.53 rmind pcb = lwp_getpcb(l); 369 1.53 rmind if ((fb = pcb->pcb_onfault) != NULL) { 370 1.58 matt tf->tf_pid = KERNEL_PID; 371 1.58 matt tf->tf_srr0 = fb->fb_pc; 372 1.58 matt tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */ 373 1.58 matt tf->tf_fixreg[1] = fb->fb_sp; 374 1.58 matt tf->tf_fixreg[2] = fb->fb_r2; 375 1.58 matt tf->tf_fixreg[3] = 1; /* Return TRUE */ 376 1.58 matt tf->tf_cr = fb->fb_cr; 377 1.58 matt memcpy(&tf->tf_fixreg[13], fb->fb_fixreg, 378 1.11 matt sizeof(fb->fb_fixreg)); 379 1.68 rin return; 380 1.1 simonb } 381 1.1 simonb } 382 1.1 simonb goto brain_damage; 383 1.68 rin 384 1.1 simonb default: 385 1.68 rin brain_damage: 386 1.58 matt printf("trap type 0x%x at 0x%lx\n", type, tf->tf_srr0); 387 1.44 garbled #if defined(DDB) || defined(KGDB) 388 1.58 matt if (kdb_trap(type, tf)) 389 1.68 rin return; 390 1.1 simonb #endif 391 1.1 simonb #ifdef TRAP_PANICWAIT 392 1.1 simonb printf("Press a key to panic.\n"); 393 1.1 simonb cngetc(); 394 1.1 simonb #endif 395 1.1 simonb panic("trap"); 396 1.1 simonb } 397 1.1 simonb 398 1.73 rin /* Invoke powerpc userret code */ 399 1.73 rin userret(l, tf); 400 1.1 simonb } 401 1.1 simonb 402 1.1 simonb int 403 1.1 simonb ctx_setup(int ctx, int srr1) 404 1.1 simonb { 405 1.1 simonb volatile struct pmap *pm; 406 1.1 simonb 407 1.1 simonb /* Update PID if we're returning to user mode. */ 408 1.1 simonb if (srr1 & PSL_PR) { 409 1.1 simonb pm = curproc->p_vmspace->vm_map.pmap; 410 1.1 simonb if (!pm->pm_ctx) { 411 1.26 scw ctx_alloc(__UNVOLATILE(pm)); 412 1.1 simonb } 413 1.1 simonb ctx = pm->pm_ctx; 414 1.1 simonb } 415 1.1 simonb else if (!ctx) { 416 1.1 simonb ctx = KERNEL_PID; 417 1.1 simonb } 418 1.1 simonb return (ctx); 419 1.1 simonb } 420 1.1 simonb 421 1.1 simonb /* 422 1.1 simonb * Used by copyin()/copyout() 423 1.1 simonb */ 424 1.50 dsl extern vaddr_t vmaprange(struct proc *, vaddr_t, vsize_t, int); 425 1.50 dsl extern void vunmaprange(vaddr_t, vsize_t); 426 1.50 dsl static int bigcopyin(const void *, void *, size_t ); 427 1.50 dsl static int bigcopyout(const void *, void *, size_t ); 428 1.1 simonb 429 1.99 rin #ifdef __clang__ 430 1.99 rin #pragma clang optimize off 431 1.99 rin #endif 432 1.1 simonb int 433 1.88 rin copyin(const void *uaddr, void *kaddr, size_t len) 434 1.1 simonb { 435 1.1 simonb struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 436 1.97 rin int rv, msr, pid, tmp, ctx; 437 1.11 matt struct faultbuf env; 438 1.1 simonb 439 1.1 simonb /* For bigger buffers use the faster copy */ 440 1.46 hpeyerl if (len > 1024) 441 1.88 rin return (bigcopyin(uaddr, kaddr, len)); 442 1.1 simonb 443 1.55 chs if ((rv = setfault(&env))) { 444 1.55 chs curpcb->pcb_onfault = NULL; 445 1.55 chs return rv; 446 1.1 simonb } 447 1.1 simonb 448 1.1 simonb if (!(ctx = pm->pm_ctx)) { 449 1.1 simonb /* No context -- assign it one */ 450 1.1 simonb ctx_alloc(pm); 451 1.1 simonb ctx = pm->pm_ctx; 452 1.1 simonb } 453 1.1 simonb 454 1.101 rin __asm volatile ( 455 1.89 rin "mfmsr %[msr];" /* Save MSR */ 456 1.91 rin "li %[tmp],0x20;" /* Disable IMMU */ 457 1.91 rin "andc %[tmp],%[msr],%[tmp];" 458 1.91 rin "mtmsr %[tmp];" 459 1.89 rin "isync;" 460 1.98 rin MFPID(%[pid]) /* Save old PID */ 461 1.89 rin 462 1.97 rin "srwi. %[tmp],%[len],0x2;" /* How many words? */ 463 1.89 rin "beq- 2f;" /* No words. Go do bytes */ 464 1.97 rin "mtctr %[tmp];" 465 1.89 rin 466 1.98 rin "1:" MTPID(%[ctx]) 467 1.89 rin "isync;" 468 1.77 rin #ifdef PPC_IBM403 469 1.89 rin "lswi %[tmp],%[uaddr],4;" /* Load user word */ 470 1.77 rin #else 471 1.89 rin "lwz %[tmp],0(%[uaddr]);" 472 1.77 rin #endif 473 1.89 rin "addi %[uaddr],%[uaddr],0x4;" /* next uaddr word */ 474 1.89 rin "sync;" 475 1.89 rin 476 1.98 rin MTPID(%[pid]) 477 1.89 rin "isync;" 478 1.77 rin #ifdef PPC_IBM403 479 1.89 rin "stswi %[tmp],%[kaddr],4;" /* Store kernel word */ 480 1.77 rin #else 481 1.89 rin "stw %[tmp],0(%[kaddr]);" 482 1.77 rin #endif 483 1.90 rin "addi %[kaddr],%[kaddr],0x4;" /* next kaddr word */ 484 1.89 rin "sync;" 485 1.89 rin "bdnz 1b;" /* repeat */ 486 1.89 rin 487 1.97 rin "2:" "andi. %[tmp],%[len],0x3;" /* How many remaining bytes? */ 488 1.92 rin "beq 10f;" 489 1.97 rin "mtxer %[tmp];" 490 1.89 rin 491 1.98 rin MTPID(%[ctx]) 492 1.89 rin "isync;" 493 1.94 rin "lswx %[tmp],0,%[uaddr];" /* Load user bytes */ 494 1.89 rin "sync;" 495 1.89 rin 496 1.98 rin MTPID(%[pid]) 497 1.89 rin "isync;" 498 1.95 rin "stswx %[tmp],0,%[kaddr];" /* Store kernel bytes */ 499 1.89 rin "sync;" 500 1.89 rin 501 1.96 rin "10:" "mtmsr %[msr];" /* Restore MSR */ 502 1.89 rin "isync;" 503 1.89 rin 504 1.46 hpeyerl : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 505 1.97 rin : [uaddr] "b" (uaddr), [kaddr] "b" (kaddr), 506 1.97 rin [ctx] "r" (ctx), [len] "r" (len) 507 1.94 rin : "cr0", "ctr", "xer"); 508 1.1 simonb 509 1.55 chs curpcb->pcb_onfault = NULL; 510 1.1 simonb return 0; 511 1.1 simonb } 512 1.99 rin #ifdef __clang__ 513 1.99 rin #pragma clang optimize on 514 1.99 rin #endif 515 1.1 simonb 516 1.1 simonb static int 517 1.88 rin bigcopyin(const void *uaddr, void *kaddr, size_t len) 518 1.1 simonb { 519 1.1 simonb const char *up; 520 1.1 simonb char *kp = kaddr; 521 1.10 thorpej struct lwp *l = curlwp; 522 1.10 thorpej struct proc *p; 523 1.55 chs struct faultbuf env; 524 1.1 simonb int error; 525 1.1 simonb 526 1.10 thorpej p = l->l_proc; 527 1.10 thorpej 528 1.1 simonb /* 529 1.7 simonb * Stolen from physio(): 530 1.1 simonb */ 531 1.88 rin error = uvm_vslock(p->p_vmspace, __UNCONST(uaddr), len, VM_PROT_READ); 532 1.1 simonb if (error) { 533 1.55 chs return error; 534 1.1 simonb } 535 1.88 rin up = (char *)vmaprange(p, (vaddr_t)uaddr, len, VM_PROT_READ); 536 1.1 simonb 537 1.55 chs if ((error = setfault(&env)) == 0) { 538 1.55 chs memcpy(kp, up, len); 539 1.55 chs } 540 1.55 chs 541 1.55 chs curpcb->pcb_onfault = NULL; 542 1.1 simonb vunmaprange((vaddr_t)up, len); 543 1.88 rin uvm_vsunlock(p->p_vmspace, __UNCONST(uaddr), len); 544 1.1 simonb 545 1.55 chs return error; 546 1.1 simonb } 547 1.1 simonb 548 1.99 rin #ifdef __clang__ 549 1.99 rin #pragma clang optimize off 550 1.99 rin #endif 551 1.1 simonb int 552 1.88 rin copyout(const void *kaddr, void *uaddr, size_t len) 553 1.1 simonb { 554 1.1 simonb struct pmap *pm = curproc->p_vmspace->vm_map.pmap; 555 1.97 rin int rv, msr, pid, tmp, ctx; 556 1.11 matt struct faultbuf env; 557 1.1 simonb 558 1.1 simonb /* For big copies use more efficient routine */ 559 1.46 hpeyerl if (len > 1024) 560 1.88 rin return (bigcopyout(kaddr, uaddr, len)); 561 1.1 simonb 562 1.55 chs if ((rv = setfault(&env))) { 563 1.55 chs curpcb->pcb_onfault = NULL; 564 1.55 chs return rv; 565 1.1 simonb } 566 1.1 simonb 567 1.1 simonb if (!(ctx = pm->pm_ctx)) { 568 1.1 simonb /* No context -- assign it one */ 569 1.1 simonb ctx_alloc(pm); 570 1.1 simonb ctx = pm->pm_ctx; 571 1.1 simonb } 572 1.1 simonb 573 1.101 rin __asm volatile ( 574 1.89 rin "mfmsr %[msr];" /* Save MSR */ 575 1.91 rin "li %[tmp],0x20;" /* Disable IMMU */ 576 1.91 rin "andc %[tmp],%[msr],%[tmp];" 577 1.91 rin "mtmsr %[tmp];" 578 1.89 rin "isync;" 579 1.98 rin MFPID(%[pid]) /* Save old PID */ 580 1.89 rin 581 1.97 rin "srwi. %[tmp],%[len],0x2;" /* How many words? */ 582 1.89 rin "beq- 2f;" /* No words. Go do bytes */ 583 1.97 rin "mtctr %[tmp];" 584 1.89 rin 585 1.94 rin "1:" 586 1.77 rin #ifdef PPC_IBM403 587 1.89 rin "lswi %[tmp],%[kaddr],4;" /* Load kernel word */ 588 1.77 rin #else 589 1.89 rin "lwz %[tmp],0(%[kaddr]);" 590 1.77 rin #endif 591 1.89 rin "addi %[kaddr],%[kaddr],0x4;" /* next kaddr word */ 592 1.89 rin "sync;" 593 1.89 rin 594 1.98 rin MTPID(%[ctx]) 595 1.89 rin "isync;" 596 1.77 rin #ifdef PPC_IBM403 597 1.89 rin "stswi %[tmp],%[uaddr],4;" /* Store user word */ 598 1.77 rin #else 599 1.89 rin "stw %[tmp],0(%[uaddr]);" 600 1.77 rin #endif 601 1.89 rin "addi %[uaddr],%[uaddr],0x4;" /* next uaddr word */ 602 1.89 rin "sync;" 603 1.94 rin 604 1.98 rin MTPID(%[pid]) 605 1.94 rin "isync;" 606 1.89 rin "bdnz 1b;" /* repeat */ 607 1.89 rin 608 1.97 rin "2:" "andi. %[tmp],%[len],0x3;" /* How many remaining bytes? */ 609 1.92 rin "beq 10f;" 610 1.97 rin "mtxer %[tmp];" 611 1.89 rin 612 1.94 rin "lswx %[tmp],0,%[kaddr];" /* Load kernel bytes */ 613 1.89 rin "sync;" 614 1.89 rin 615 1.98 rin MTPID(%[ctx]) 616 1.89 rin "isync;" 617 1.94 rin "stswx %[tmp],0,%[uaddr];" /* Store user bytes */ 618 1.89 rin "sync;" 619 1.89 rin 620 1.98 rin MTPID(%[pid]) /* Restore PID and MSR */ 621 1.94 rin "10:" "mtmsr %[msr];" 622 1.89 rin "isync;" 623 1.89 rin 624 1.46 hpeyerl : [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp) 625 1.97 rin : [uaddr] "b" (uaddr), [kaddr] "b" (kaddr), 626 1.97 rin [ctx] "r" (ctx), [len] "r" (len) 627 1.94 rin : "cr0", "ctr", "xer"); 628 1.1 simonb 629 1.55 chs curpcb->pcb_onfault = NULL; 630 1.1 simonb return 0; 631 1.1 simonb } 632 1.99 rin #ifdef __clang__ 633 1.99 rin #pragma clang optimize on 634 1.99 rin #endif 635 1.1 simonb 636 1.1 simonb static int 637 1.88 rin bigcopyout(const void *kaddr, void *uaddr, size_t len) 638 1.1 simonb { 639 1.1 simonb char *up; 640 1.26 scw const char *kp = (const char *)kaddr; 641 1.10 thorpej struct lwp *l = curlwp; 642 1.10 thorpej struct proc *p; 643 1.55 chs struct faultbuf env; 644 1.1 simonb int error; 645 1.1 simonb 646 1.10 thorpej p = l->l_proc; 647 1.10 thorpej 648 1.1 simonb /* 649 1.7 simonb * Stolen from physio(): 650 1.1 simonb */ 651 1.88 rin error = uvm_vslock(p->p_vmspace, uaddr, len, VM_PROT_WRITE); 652 1.1 simonb if (error) { 653 1.55 chs return error; 654 1.1 simonb } 655 1.88 rin up = (char *)vmaprange(p, (vaddr_t)uaddr, len, 656 1.13 simonb VM_PROT_READ | VM_PROT_WRITE); 657 1.1 simonb 658 1.55 chs if ((error = setfault(&env)) == 0) { 659 1.55 chs memcpy(up, kp, len); 660 1.55 chs } 661 1.55 chs 662 1.55 chs curpcb->pcb_onfault = NULL; 663 1.1 simonb vunmaprange((vaddr_t)up, len); 664 1.88 rin uvm_vsunlock(p->p_vmspace, uaddr, len); 665 1.1 simonb 666 1.55 chs return error; 667 1.1 simonb } 668 1.1 simonb 669 1.1 simonb /* 670 1.1 simonb * kcopy(const void *src, void *dst, size_t len); 671 1.1 simonb * 672 1.1 simonb * Copy len bytes from src to dst, aborting if we encounter a fatal 673 1.1 simonb * page fault. 674 1.1 simonb * 675 1.1 simonb * kcopy() _must_ save and restore the old fault handler since it is 676 1.1 simonb * called by uiomove(), which may be in the path of servicing a non-fatal 677 1.1 simonb * page fault. 678 1.1 simonb */ 679 1.1 simonb int 680 1.1 simonb kcopy(const void *src, void *dst, size_t len) 681 1.1 simonb { 682 1.11 matt struct faultbuf env, *oldfault; 683 1.55 chs int rv; 684 1.1 simonb 685 1.1 simonb oldfault = curpcb->pcb_onfault; 686 1.55 chs if ((rv = setfault(&env))) { 687 1.1 simonb curpcb->pcb_onfault = oldfault; 688 1.55 chs return rv; 689 1.1 simonb } 690 1.1 simonb 691 1.2 wiz memcpy(dst, src, len); 692 1.1 simonb 693 1.1 simonb curpcb->pcb_onfault = oldfault; 694 1.1 simonb return 0; 695 1.1 simonb } 696 1.1 simonb 697 1.75 rin #if 0 698 1.1 simonb int 699 1.1 simonb badaddr(void *addr, size_t size) 700 1.1 simonb { 701 1.1 simonb 702 1.1 simonb return badaddr_read(addr, size, NULL); 703 1.1 simonb } 704 1.1 simonb 705 1.1 simonb int 706 1.1 simonb badaddr_read(void *addr, size_t size, int *rptr) 707 1.1 simonb { 708 1.11 matt struct faultbuf env; 709 1.1 simonb int x; 710 1.1 simonb 711 1.1 simonb /* Get rid of any stale machine checks that have been waiting. */ 712 1.28 perry __asm volatile ("sync; isync"); 713 1.1 simonb 714 1.11 matt if (setfault(&env)) { 715 1.55 chs curpcb->pcb_onfault = NULL; 716 1.28 perry __asm volatile ("sync"); 717 1.1 simonb return 1; 718 1.1 simonb } 719 1.1 simonb 720 1.28 perry __asm volatile ("sync"); 721 1.1 simonb 722 1.1 simonb switch (size) { 723 1.1 simonb case 1: 724 1.1 simonb x = *(volatile int8_t *)addr; 725 1.1 simonb break; 726 1.1 simonb case 2: 727 1.1 simonb x = *(volatile int16_t *)addr; 728 1.1 simonb break; 729 1.1 simonb case 4: 730 1.1 simonb x = *(volatile int32_t *)addr; 731 1.1 simonb break; 732 1.1 simonb default: 733 1.1 simonb panic("badaddr: invalid size (%d)", size); 734 1.1 simonb } 735 1.1 simonb 736 1.1 simonb /* Make sure we took the machine check, if we caused one. */ 737 1.28 perry __asm volatile ("sync; isync"); 738 1.1 simonb 739 1.55 chs curpcb->pcb_onfault = NULL; 740 1.28 perry __asm volatile ("sync"); /* To be sure. */ 741 1.1 simonb 742 1.1 simonb /* Use the value to avoid reorder. */ 743 1.1 simonb if (rptr) 744 1.1 simonb *rptr = x; 745 1.1 simonb 746 1.1 simonb return 0; 747 1.1 simonb } 748 1.75 rin #endif 749 1.1 simonb 750 1.87 rin #ifndef PPC_NO_UNALIGNED 751 1.87 rin static bool 752 1.87 rin fix_unaligned(struct trapframe *tf, ksiginfo_t *ksi) 753 1.1 simonb { 754 1.1 simonb 755 1.87 rin KSI_INIT_TRAP(ksi); 756 1.87 rin ksi->ksi_signo = SIGBUS; 757 1.87 rin ksi->ksi_trap = EXC_ALI; 758 1.87 rin ksi->ksi_addr = (void *)tf->tf_dear; 759 1.87 rin return true; 760 1.10 thorpej } 761 1.87 rin #endif 762 1.70 thorpej 763 1.70 thorpej /* 764 1.70 thorpej * XXX Extremely lame implementations of _ufetch_* / _ustore_*. IBM 4xx 765 1.70 thorpej * experts should make versions that are good. 766 1.70 thorpej */ 767 1.70 thorpej 768 1.70 thorpej #define UFETCH(sz) \ 769 1.70 thorpej int \ 770 1.70 thorpej _ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp) \ 771 1.70 thorpej { \ 772 1.70 thorpej return copyin(uaddr, valp, sizeof(*valp)); \ 773 1.70 thorpej } 774 1.70 thorpej 775 1.70 thorpej UFETCH(8) 776 1.70 thorpej UFETCH(16) 777 1.70 thorpej UFETCH(32) 778 1.70 thorpej 779 1.70 thorpej #define USTORE(sz) \ 780 1.70 thorpej int \ 781 1.70 thorpej _ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \ 782 1.70 thorpej { \ 783 1.70 thorpej return copyout(&val, uaddr, sizeof(val)); \ 784 1.70 thorpej } 785 1.70 thorpej 786 1.70 thorpej USTORE(8) 787 1.70 thorpej USTORE(16) 788 1.70 thorpej USTORE(32) 789