Home | History | Annotate | Line # | Download | only in powerpc
      1 /*	$NetBSD: fpu.c,v 1.42 2020/07/15 09:19:49 rin Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 1996 Wolfgang Solfrank.
      5  * Copyright (C) 1996 TooLs GmbH.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by TooLs GmbH.
     19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.42 2020/07/15 09:19:49 rin Exp $");
     36 
     37 #include <sys/param.h>
     38 #include <sys/proc.h>
     39 #include <sys/systm.h>
     40 #include <sys/atomic.h>
     41 #include <sys/siginfo.h>
     42 #include <sys/pcu.h>
     43 
     44 #include <machine/pcb.h>
     45 #include <machine/fpu.h>
     46 #include <machine/psl.h>
     47 
     48 static void fpu_state_load(lwp_t *, u_int);
     49 static void fpu_state_save(lwp_t *);
     50 static void fpu_state_release(lwp_t *);
     51 
     52 const pcu_ops_t fpu_ops = {
     53 	.pcu_id = PCU_FPU,
     54 	.pcu_state_load = fpu_state_load,
     55 	.pcu_state_save = fpu_state_save,
     56 	.pcu_state_release = fpu_state_release,
     57 };
     58 
     59 bool
     60 fpu_used_p(lwp_t *l)
     61 {
     62 	return pcu_valid_p(&fpu_ops, l);
     63 }
     64 
     65 void
     66 fpu_mark_used(lwp_t *l)
     67 {
     68 	pcu_discard(&fpu_ops, l, true);
     69 }
     70 
     71 void
     72 fpu_state_load(lwp_t *l, u_int flags)
     73 {
     74 #ifdef PPC_HAVE_FPU
     75 	struct pcb * const pcb = lwp_getpcb(l);
     76 
     77 	if ((flags & PCU_VALID) == 0) {
     78 		memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
     79 	}
     80 
     81 	if ((flags & PCU_REENABLE) == 0) {
     82 		const register_t msr = mfmsr();
     83 		mtmsr((msr & ~PSL_EE) | PSL_FP);
     84 		__asm volatile ("isync");
     85 
     86 		fpu_load_from_fpreg(&pcb->pcb_fpu);
     87 		__asm volatile ("sync");
     88 
     89 		mtmsr(msr);
     90 		__asm volatile ("isync");
     91 	}
     92 
     93 	curcpu()->ci_ev_fpusw.ev_count++;
     94 	l->l_md.md_utf->tf_srr1 |= PSL_FP|(pcb->pcb_flags & (PCB_FE0|PCB_FE1));
     95 #endif
     96 }
     97 
     98 /*
     99  * Save the contents of the current CPU's FPU to its PCB.
    100  */
    101 void
    102 fpu_state_save(lwp_t *l)
    103 {
    104 #ifdef PPC_HAVE_FPU
    105 	struct pcb * const pcb = lwp_getpcb(l);
    106 
    107 	const register_t msr = mfmsr();
    108         mtmsr((msr & ~PSL_EE) | PSL_FP);
    109 	__asm volatile ("isync");
    110 
    111 	fpu_unload_to_fpreg(&pcb->pcb_fpu);
    112 	__asm volatile ("sync");
    113 
    114 	mtmsr(msr);
    115 	__asm volatile ("isync");
    116 #endif
    117 }
    118 
    119 void
    120 fpu_state_release(lwp_t *l)
    121 {
    122 #ifdef PPC_HAVE_FPU
    123 	l->l_md.md_utf->tf_srr1 &= ~PSL_FP;
    124 #endif
    125 }
    126 
    127 #define	STICKYBITS	(FPSCR_VX|FPSCR_OX|FPSCR_UX|FPSCR_ZX|FPSCR_XX)
    128 #define	STICKYSHIFT	25
    129 #define	MASKBITS	(FPSCR_VE|FPSCR_OE|FPSCR_UE|FPSCR_ZE|FPSCR_XE)
    130 #define	MASKSHIFT	3
    131 
    132 int
    133 fpu_get_fault_code(void)
    134 {
    135 	lwp_t * const l = curlwp;
    136 	struct pcb * const pcb = lwp_getpcb(l);
    137 	uint64_t fpscr64;
    138 	uint32_t fpscr, ofpscr;
    139 	int code;
    140 
    141 #ifdef PPC_HAVE_FPU
    142 	kpreempt_disable();
    143 
    144 	struct cpu_info * const ci = curcpu();
    145 
    146 	/*
    147 	 * If we got preempted, we may be running on a different CPU.  So we
    148 	 * need to check for that.
    149 	 */
    150 	KASSERT(fpu_used_p(l));
    151 	if (__predict_true(l->l_pcu_cpu[PCU_FPU] == ci)) {
    152 		uint64_t tmp;
    153 		const register_t msr = mfmsr();
    154 		mtmsr((msr & ~PSL_EE) | PSL_FP);
    155 		__asm volatile ("isync");
    156 		__asm volatile (
    157 			"stfd	0,0(%[tmp])\n"		/* save f0 */
    158 			"mffs	0\n"			/* get FPSCR */
    159 			"stfd	0,0(%[fpscr64])\n"	/* store a temp copy */
    160 			"mtfsb0	0\n"			/* clear FPSCR_FX */
    161 			"mtfsb0	24\n"			/* clear FPSCR_VE */
    162 			"mtfsb0	25\n"			/* clear FPSCR_OE */
    163 			"mtfsb0	26\n"			/* clear FPSCR_UE */
    164 			"mtfsb0	27\n"			/* clear FPSCR_ZE */
    165 			"mtfsb0	28\n"			/* clear FPSCR_XE */
    166 			"mffs	0\n"			/* get FPSCR */
    167 			"stfd	0,0(%[fpscr])\n"	/* store it */
    168 			"lfd	0,0(%[tmp])\n"		/* restore f0 */
    169 		    ::	[tmp] "b"(&tmp),
    170 			[fpscr] "b"(&pcb->pcb_fpu.fpscr),
    171 			[fpscr64] "b"(&fpscr64));
    172 		mtmsr(msr);
    173 		__asm volatile ("isync");
    174 	} else {
    175 		/*
    176 		 * We got preempted to a different CPU so we need to save
    177 		 * our FPU state.
    178 		 */
    179 		fpu_save(l);
    180 		fpscr64 = *(uint64_t *)&pcb->pcb_fpu.fpscr;
    181 		((uint32_t *)&pcb->pcb_fpu.fpscr)[_QUAD_LOWWORD] &= ~MASKBITS;
    182 	}
    183 
    184 	kpreempt_enable();
    185 #else /* !PPC_HAVE_FPU */
    186 	fpscr64 = *(uint64_t *)&pcb->pcb_fpu.fpscr;
    187 	((uint32_t *)&pcb->pcb_fpu.fpscr)[_QUAD_LOWWORD] &= ~MASKBITS;
    188 #endif
    189 
    190 	/*
    191 	 * Now determine the fault type.  First we test to see if any of sticky
    192 	 * bits correspond to the enabled exceptions.  If so, we only test
    193 	 * those bits.  If not, we look at all the bits.  (In reality, we only
    194 	 * could get an exception if FPSCR_FEX changed state.  So we should
    195 	 * have at least one bit that corresponds).
    196 	 */
    197 	ofpscr = (uint32_t)fpscr64;
    198 	ofpscr &= ofpscr << (STICKYSHIFT - MASKSHIFT);
    199 	fpscr = ((uint32_t *)&pcb->pcb_fpu.fpscr)[_QUAD_LOWWORD];
    200 	if (fpscr & ofpscr & STICKYBITS)
    201 		fpscr &= ofpscr;
    202 
    203 	/*
    204 	 * Let's determine what the appropriate code is.
    205 	 */
    206 	if (fpscr & FPSCR_VX)		code = FPE_FLTINV;
    207 	else if (fpscr & FPSCR_OX)	code = FPE_FLTOVF;
    208 	else if (fpscr & FPSCR_UX)	code = FPE_FLTUND;
    209 	else if (fpscr & FPSCR_ZX)	code = FPE_FLTDIV;
    210 	else if (fpscr & FPSCR_XX)	code = FPE_FLTRES;
    211 	else				code = 0;
    212 	return code;
    213 }
    214 
    215 bool
    216 fpu_save_to_mcontext(lwp_t *l, mcontext_t *mcp, unsigned int *flagp)
    217 {
    218 	KASSERT(l == curlwp);
    219 
    220 	if (!pcu_valid_p(&fpu_ops, l))
    221 		return false;
    222 
    223 	struct pcb * const pcb = lwp_getpcb(l);
    224 
    225 #ifdef PPC_HAVE_FPU
    226 	/* If we're the FPU owner, dump its context to the PCB first. */
    227 	pcu_save(&fpu_ops, l);
    228 #endif
    229 	(void)memcpy(mcp->__fpregs.__fpu_regs, pcb->pcb_fpu.fpreg,
    230 	    sizeof (mcp->__fpregs.__fpu_regs));
    231 	mcp->__fpregs.__fpu_fpscr =
    232 	    ((int *)&pcb->pcb_fpu.fpscr)[_QUAD_LOWWORD];
    233 	mcp->__fpregs.__fpu_valid = 1;
    234 	*flagp |= _UC_FPU;
    235 	return true;
    236 }
    237 
    238 void
    239 fpu_restore_from_mcontext(lwp_t *l, const mcontext_t *mcp)
    240 {
    241 	if (!mcp->__fpregs.__fpu_valid)
    242 		return;
    243 
    244 	struct pcb * const pcb = lwp_getpcb(l);
    245 
    246 #ifdef PPC_HAVE_FPU
    247 	/* we don't need to save the state, just drop it */
    248 	pcu_discard(&fpu_ops, l, true);
    249 #endif
    250 	(void)memcpy(&pcb->pcb_fpu.fpreg, &mcp->__fpregs.__fpu_regs,
    251 	    sizeof (pcb->pcb_fpu.fpreg));
    252 	((int *)&pcb->pcb_fpu.fpscr)[_QUAD_LOWWORD] = mcp->__fpregs.__fpu_fpscr;
    253 }
    254