1 /* $NetBSD: hppa_machdep.c,v 1.33 2022/05/13 18:40:02 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: hppa_machdep.c,v 1.33 2022/05/13 18:40:02 skrll Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/lwp.h> 35 #include <sys/proc.h> 36 #include <sys/ras.h> 37 #include <sys/cpu.h> 38 39 #include <sys/kernel.h> 40 41 #include <uvm/uvm_extern.h> 42 43 #include <machine/cpufunc.h> 44 #include <machine/pcb.h> 45 #include <machine/mcontext.h> 46 #include <hppa/hppa/machdep.h> 47 48 /* the following is used externally (sysctl_hw) */ 49 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 50 51 /* 52 * XXX fredette - much of the TLB trap handler setup should 53 * probably be moved here from hppa/hppa/machdep.c, seeing 54 * that there's related code already in hppa/hppa/trap.S. 55 */ 56 57 void 58 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) 59 { 60 struct trapframe *tf = l->l_md.md_regs; 61 struct pcb *pcb = lwp_getpcb(l); 62 __greg_t *gr = mcp->__gregs; 63 __greg_t ras_pc; 64 65 gr[0] = tf->tf_ipsw; 66 gr[1] = tf->tf_r1; 67 gr[2] = tf->tf_rp; 68 gr[3] = tf->tf_r3; 69 gr[4] = tf->tf_r4; 70 gr[5] = tf->tf_r5; 71 gr[6] = tf->tf_r6; 72 gr[7] = tf->tf_r7; 73 gr[8] = tf->tf_r8; 74 gr[9] = tf->tf_r9; 75 gr[10] = tf->tf_r10; 76 gr[11] = tf->tf_r11; 77 gr[12] = tf->tf_r12; 78 gr[13] = tf->tf_r13; 79 gr[14] = tf->tf_r14; 80 gr[15] = tf->tf_r15; 81 gr[16] = tf->tf_r16; 82 gr[17] = tf->tf_r17; 83 gr[18] = tf->tf_r18; 84 gr[19] = tf->tf_t4; 85 gr[20] = tf->tf_t3; 86 gr[21] = tf->tf_t2; 87 gr[22] = tf->tf_t1; 88 gr[23] = tf->tf_arg3; 89 gr[24] = tf->tf_arg2; 90 gr[25] = tf->tf_arg1; 91 gr[26] = tf->tf_arg0; 92 gr[27] = tf->tf_dp; 93 gr[28] = tf->tf_ret0; 94 gr[29] = tf->tf_ret1; 95 gr[30] = tf->tf_sp; 96 gr[31] = tf->tf_r31; 97 98 gr[_REG_SAR] = tf->tf_sar; 99 gr[_REG_PCSQH] = tf->tf_iisq_head; 100 gr[_REG_PCSQT] = tf->tf_iisq_tail; 101 gr[_REG_PCOQH] = tf->tf_iioq_head; 102 gr[_REG_PCOQT] = tf->tf_iioq_tail; 103 gr[_REG_SR0] = tf->tf_sr0; 104 gr[_REG_SR1] = tf->tf_sr1; 105 gr[_REG_SR2] = tf->tf_sr2; 106 gr[_REG_SR3] = tf->tf_sr3; 107 gr[_REG_SR4] = tf->tf_sr4; 108 gr[_REG_CR27] = tf->tf_cr27; 109 #if 0 110 gr[_REG_CR26] = tf->tf_cr26; 111 #endif 112 113 ras_pc = (__greg_t)ras_lookup(l->l_proc, 114 (void *)(gr[_REG_PCOQH] & ~HPPA_PC_PRIV_MASK)); 115 if (ras_pc != -1) { 116 ras_pc |= HPPA_PC_PRIV_USER; 117 gr[_REG_PCOQH] = ras_pc; 118 gr[_REG_PCOQT] = ras_pc + 4; 119 } 120 121 *flags |= _UC_CPU | _UC_TLSBASE; 122 123 if (l->l_md.md_flags & 0) { 124 return; 125 } 126 127 hppa_fpu_flush(l); 128 memcpy(&mcp->__fpregs, pcb->pcb_fpregs, sizeof(mcp->__fpregs)); 129 *flags |= _UC_FPU; 130 } 131 132 int 133 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp) 134 { 135 const __greg_t *gr = mcp->__gregs; 136 137 if ((gr[_REG_PSW] & (PSW_MBS|PSW_MBZ)) != PSW_MBS) { 138 return EINVAL; 139 } 140 141 #if 0 142 /* 143 * XXX 144 * Force the space regs and privilege bits to 145 * the right values in the trapframe for now. 146 */ 147 148 if (gr[_REG_PCSQH] != pmap_sid(pmap, gr[_REG_PCOQH])) { 149 return EINVAL; 150 } 151 152 if (gr[_REG_PCSQT] != pmap_sid(pmap, gr[_REG_PCOQT])) { 153 return EINVAL; 154 } 155 156 if (gr[_REG_PCOQH] < 0xc0000020 && 157 (gr[_REG_PCOQH] & HPPA_PC_PRIV_MASK) != HPPA_PC_PRIV_USER) { 158 return EINVAL; 159 } 160 161 if (gr[_REG_PCOQT] < 0xc0000020 && 162 (gr[_REG_PCOQT] & HPPA_PC_PRIV_MASK) != HPPA_PC_PRIV_USER) { 163 return EINVAL; 164 } 165 #endif 166 167 return 0; 168 } 169 170 int 171 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) 172 { 173 struct trapframe *tf = l->l_md.md_regs; 174 struct proc *p = l->l_proc; 175 struct pmap *pmap = p->p_vmspace->vm_map.pmap; 176 const __greg_t *gr = mcp->__gregs; 177 int error; 178 179 if ((flags & _UC_CPU) != 0) { 180 error = cpu_mcontext_validate(l, mcp); 181 if (error) 182 return error; 183 184 tf->tf_ipsw = gr[0] | 185 (hppa_cpu_ispa20_p() ? PSW_O : 0); 186 tf->tf_r1 = gr[1]; 187 tf->tf_rp = gr[2]; 188 tf->tf_r3 = gr[3]; 189 tf->tf_r4 = gr[4]; 190 tf->tf_r5 = gr[5]; 191 tf->tf_r6 = gr[6]; 192 tf->tf_r7 = gr[7]; 193 tf->tf_r8 = gr[8]; 194 tf->tf_r9 = gr[9]; 195 tf->tf_r10 = gr[10]; 196 tf->tf_r11 = gr[11]; 197 tf->tf_r12 = gr[12]; 198 tf->tf_r13 = gr[13]; 199 tf->tf_r14 = gr[14]; 200 tf->tf_r15 = gr[15]; 201 tf->tf_r16 = gr[16]; 202 tf->tf_r17 = gr[17]; 203 tf->tf_r18 = gr[18]; 204 tf->tf_t4 = gr[19]; 205 tf->tf_t3 = gr[20]; 206 tf->tf_t2 = gr[21]; 207 tf->tf_t1 = gr[22]; 208 tf->tf_arg3 = gr[23]; 209 tf->tf_arg2 = gr[24]; 210 tf->tf_arg1 = gr[25]; 211 tf->tf_arg0 = gr[26]; 212 tf->tf_dp = gr[27]; 213 tf->tf_ret0 = gr[28]; 214 tf->tf_ret1 = gr[29]; 215 tf->tf_sp = gr[30]; 216 tf->tf_r31 = gr[31]; 217 tf->tf_sar = gr[_REG_SAR]; 218 tf->tf_iisq_head = pmap_sid(pmap, gr[_REG_PCOQH]); 219 tf->tf_iisq_tail = pmap_sid(pmap, gr[_REG_PCOQT]); 220 221 tf->tf_iioq_head = gr[_REG_PCOQH]; 222 tf->tf_iioq_tail = gr[_REG_PCOQT]; 223 224 if (tf->tf_iioq_head >= 0xc0000020) { 225 tf->tf_iioq_head &= ~HPPA_PC_PRIV_MASK; 226 } else { 227 tf->tf_iioq_head |= HPPA_PC_PRIV_USER; 228 } 229 if (tf->tf_iioq_tail >= 0xc0000020) { 230 tf->tf_iioq_tail &= ~HPPA_PC_PRIV_MASK; 231 } else { 232 tf->tf_iioq_tail |= HPPA_PC_PRIV_USER; 233 } 234 235 #if 0 236 tf->tf_sr0 = gr[_REG_SR0]; 237 tf->tf_sr1 = gr[_REG_SR1]; 238 tf->tf_sr2 = gr[_REG_SR2]; 239 tf->tf_sr3 = gr[_REG_SR3]; 240 tf->tf_sr4 = gr[_REG_SR4]; 241 tf->tf_cr26 = gr[_REG_CR26]; 242 #endif 243 } 244 245 /* Restore the private thread context */ 246 if (flags & _UC_TLSBASE) { 247 lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_CR27]); 248 tf->tf_cr27 = gr[_REG_CR27]; 249 } 250 251 /* Restore the floating point registers */ 252 if ((flags & _UC_FPU) != 0) { 253 struct pcb *pcb = lwp_getpcb(l); 254 255 hppa_fpu_flush(l); 256 memcpy(pcb->pcb_fpregs, &mcp->__fpregs, sizeof(mcp->__fpregs)); 257 } 258 259 mutex_enter(p->p_lock); 260 if (flags & _UC_SETSTACK) 261 l->l_sigstk.ss_flags |= SS_ONSTACK; 262 if (flags & _UC_CLRSTACK) 263 l->l_sigstk.ss_flags &= ~SS_ONSTACK; 264 mutex_exit(p->p_lock); 265 266 return 0; 267 } 268 269 /* 270 * Do RAS processing. 271 */ 272 273 void 274 hppa_ras(struct lwp *l) 275 { 276 struct proc *p; 277 struct trapframe *tf; 278 intptr_t rasaddr; 279 280 p = l->l_proc; 281 tf = l->l_md.md_regs; 282 283 rasaddr = (intptr_t)ras_lookup(p, 284 (void *)(tf->tf_iioq_head & ~HPPA_PC_PRIV_MASK)); 285 if (rasaddr != -1) { 286 rasaddr |= HPPA_PC_PRIV_USER; 287 tf->tf_iioq_head = rasaddr; 288 tf->tf_iioq_tail = rasaddr + 4; 289 } 290 } 291 292 /* 293 * Preempt the current LWP if in interrupt from user mode, 294 * or after the current trap/syscall if in system mode. 295 */ 296 void 297 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags) 298 { 299 300 if ((flags & RESCHED_REMOTE) != 0) { 301 #ifdef MULTIPROCESSOR 302 /* XXX send IPI */ 303 #endif 304 } else { 305 setsoftast(l); 306 } 307 } 308 309 #ifdef MODULAR 310 struct lwp * 311 hppa_curlwp(void) 312 { 313 return curlwp; 314 } 315 316 struct cpu_info * 317 hppa_curcpu(void) 318 { 319 return curcpu(); 320 } 321 #endif 322 323