Home | History | Annotate | Line # | Download | only in vfp
vfp_init.c revision 1.23
      1  1.23      matt /*      $NetBSD: vfp_init.c,v 1.23 2013/08/18 06:28:18 matt Exp $ */
      2   1.1  rearnsha 
      3   1.1  rearnsha /*
      4   1.1  rearnsha  * Copyright (c) 2008 ARM Ltd
      5   1.1  rearnsha  * All rights reserved.
      6   1.1  rearnsha  *
      7   1.1  rearnsha  * Redistribution and use in source and binary forms, with or without
      8   1.1  rearnsha  * modification, are permitted provided that the following conditions
      9   1.1  rearnsha  * are met:
     10   1.1  rearnsha  * 1. Redistributions of source code must retain the above copyright
     11   1.1  rearnsha  *    notice, this list of conditions and the following disclaimer.
     12   1.1  rearnsha  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1  rearnsha  *    notice, this list of conditions and the following disclaimer in the
     14   1.1  rearnsha  *    documentation and/or other materials provided with the distribution.
     15   1.1  rearnsha  * 3. The name of the company may not be used to endorse or promote
     16   1.1  rearnsha  *    products derived from this software without specific prior written
     17   1.1  rearnsha  *    permission.
     18   1.1  rearnsha  *
     19   1.1  rearnsha  * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR
     20   1.1  rearnsha  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21   1.1  rearnsha  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22   1.1  rearnsha  * ARE DISCLAIMED.  IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY
     23   1.1  rearnsha  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24   1.1  rearnsha  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
     25   1.1  rearnsha  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  rearnsha  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     27   1.1  rearnsha  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     28   1.1  rearnsha  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     29   1.1  rearnsha  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30   1.1  rearnsha  */
     31   1.1  rearnsha 
     32   1.1  rearnsha #include <sys/param.h>
     33   1.1  rearnsha #include <sys/types.h>
     34   1.1  rearnsha #include <sys/systm.h>
     35   1.1  rearnsha #include <sys/device.h>
     36   1.1  rearnsha #include <sys/proc.h>
     37   1.4      matt #include <sys/cpu.h>
     38   1.1  rearnsha 
     39  1.23      matt #include <arm/locore.h>
     40   1.5      matt #include <arm/pcb.h>
     41   1.1  rearnsha #include <arm/undefined.h>
     42   1.1  rearnsha #include <arm/vfpreg.h>
     43   1.8      matt #include <arm/mcontext.h>
     44   1.1  rearnsha 
     45  1.12      matt #include <uvm/uvm_extern.h>		/* for pmap.h */
     46  1.12      matt 
     47  1.18      matt extern int cpu_media_and_vfp_features[];
     48  1.18      matt extern int cpu_neon_present;
     49  1.18      matt 
     50  1.11      matt #ifdef FPU_VFP
     51  1.11      matt 
     52   1.1  rearnsha /* FLDMD <X>, {d0-d15} */
     53  1.11      matt static inline void
     54  1.13      matt load_vfpregs_lo(const uint64_t *p)
     55  1.10      matt {
     56  1.10      matt 	/* vldmia rN, {d0-d15} */
     57  1.10      matt 	__asm __volatile("ldc\tp11, c0, [%0], {32}" :: "r" (p) : "memory");
     58  1.10      matt }
     59  1.10      matt 
     60  1.10      matt /* FSTMD <X>, {d0-d15} */
     61  1.11      matt static inline void
     62  1.10      matt save_vfpregs_lo(uint64_t *p)
     63  1.10      matt {
     64  1.10      matt 	__asm __volatile("stc\tp11, c0, [%0], {32}" :: "r" (p) : "memory");
     65  1.10      matt }
     66  1.10      matt 
     67  1.10      matt #ifdef CPU_CORTEX
     68  1.10      matt /* FLDMD <X>, {d16-d31} */
     69  1.11      matt static inline void
     70  1.13      matt load_vfpregs_hi(const uint64_t *p)
     71  1.10      matt {
     72  1.10      matt 	__asm __volatile("ldcl\tp11, c0, [%0], {32}" :: "r" (&p[16]) : "memory");
     73  1.10      matt }
     74  1.10      matt 
     75  1.10      matt /* FLDMD <X>, {d16-d31} */
     76  1.11      matt static inline void
     77  1.10      matt save_vfpregs_hi(uint64_t *p)
     78  1.10      matt {
     79  1.10      matt 	__asm __volatile("stcl\tp11, c0, [%0], {32}" :: "r" (&p[16]) : "memory");
     80  1.10      matt }
     81  1.10      matt #endif
     82   1.1  rearnsha 
     83  1.13      matt static inline void
     84  1.13      matt load_vfpregs(const struct vfpreg *fregs)
     85  1.13      matt {
     86  1.13      matt 	load_vfpregs_lo(fregs->vfp_regs);
     87  1.13      matt #ifdef CPU_CORTEX
     88  1.13      matt #ifdef CPU_ARM11
     89  1.13      matt 	switch (curcpu()->ci_vfp_id) {
     90  1.13      matt 	case FPU_VFP_CORTEXA5:
     91  1.13      matt 	case FPU_VFP_CORTEXA7:
     92  1.13      matt 	case FPU_VFP_CORTEXA8:
     93  1.13      matt 	case FPU_VFP_CORTEXA9:
     94  1.20      matt 	case FPU_VFP_CORTEXA15:
     95  1.13      matt #endif
     96  1.13      matt 		load_vfpregs_hi(fregs->vfp_regs);
     97  1.13      matt #ifdef CPU_ARM11
     98  1.13      matt 		break;
     99  1.13      matt 	}
    100  1.13      matt #endif
    101  1.13      matt #endif
    102  1.13      matt }
    103  1.13      matt 
    104  1.13      matt static inline void
    105  1.13      matt save_vfpregs(struct vfpreg *fregs)
    106  1.13      matt {
    107  1.13      matt 	save_vfpregs_lo(fregs->vfp_regs);
    108  1.13      matt #ifdef CPU_CORTEX
    109  1.13      matt #ifdef CPU_ARM11
    110  1.13      matt 	switch (curcpu()->ci_vfp_id) {
    111  1.13      matt 	case FPU_VFP_CORTEXA5:
    112  1.13      matt 	case FPU_VFP_CORTEXA7:
    113  1.13      matt 	case FPU_VFP_CORTEXA8:
    114  1.13      matt 	case FPU_VFP_CORTEXA9:
    115  1.20      matt 	case FPU_VFP_CORTEXA15:
    116  1.13      matt #endif
    117  1.13      matt 		save_vfpregs_hi(fregs->vfp_regs);
    118  1.13      matt #ifdef CPU_ARM11
    119  1.13      matt 		break;
    120  1.13      matt 	}
    121  1.13      matt #endif
    122  1.13      matt #endif
    123  1.13      matt }
    124  1.13      matt 
    125   1.1  rearnsha /* The real handler for VFP bounces.  */
    126   1.1  rearnsha static int vfp_handler(u_int, u_int, trapframe_t *, int);
    127  1.13      matt #ifdef CPU_CORTEX
    128  1.13      matt static int neon_handler(u_int, u_int, trapframe_t *, int);
    129  1.13      matt #endif
    130   1.1  rearnsha 
    131  1.13      matt static void vfp_state_load(lwp_t *, u_int);
    132  1.13      matt static void vfp_state_save(lwp_t *, u_int);
    133  1.13      matt static void vfp_state_release(lwp_t *, u_int);
    134   1.4      matt 
    135   1.4      matt const pcu_ops_t arm_vfp_ops = {
    136   1.4      matt 	.pcu_id = PCU_FPU,
    137  1.13      matt 	.pcu_state_save = vfp_state_save,
    138   1.4      matt 	.pcu_state_load = vfp_state_load,
    139   1.4      matt 	.pcu_state_release = vfp_state_release,
    140   1.4      matt };
    141   1.1  rearnsha 
    142   1.1  rearnsha struct evcnt vfpevent_use;
    143   1.1  rearnsha struct evcnt vfpevent_reuse;
    144  1.21      matt struct evcnt vfpevent_fpe;
    145   1.1  rearnsha 
    146   1.1  rearnsha /*
    147   1.1  rearnsha  * Used to test for a VFP. The following function is installed as a coproc10
    148   1.1  rearnsha  * handler on the undefined instruction vector and then we issue a VFP
    149   1.1  rearnsha  * instruction. If undefined_test is non zero then the VFP did not handle
    150   1.1  rearnsha  * the instruction so must be absent, or disabled.
    151   1.1  rearnsha  */
    152   1.1  rearnsha 
    153   1.1  rearnsha static int undefined_test;
    154   1.1  rearnsha 
    155   1.1  rearnsha static int
    156   1.4      matt vfp_test(u_int address, u_int insn, trapframe_t *frame, int fault_code)
    157   1.1  rearnsha {
    158   1.1  rearnsha 
    159   1.1  rearnsha 	frame->tf_pc += INSN_SIZE;
    160   1.1  rearnsha 	++undefined_test;
    161   1.4      matt 	return 0;
    162   1.4      matt }
    163   1.4      matt 
    164   1.4      matt #endif /* FPU_VFP */
    165   1.4      matt 
    166   1.4      matt struct evcnt vfp_fpscr_ev =
    167   1.4      matt     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "VFP", "FPSCR traps");
    168   1.4      matt EVCNT_ATTACH_STATIC(vfp_fpscr_ev);
    169   1.4      matt 
    170   1.4      matt static int
    171   1.4      matt vfp_fpscr_handler(u_int address, u_int insn, trapframe_t *frame, int fault_code)
    172   1.4      matt {
    173   1.4      matt 	struct lwp * const l = curlwp;
    174   1.4      matt 	const u_int regno = (insn >> 12) & 0xf;
    175   1.4      matt 	/*
    176   1.4      matt 	 * Only match move to/from the FPSCR register and we
    177   1.4      matt 	 * can't be using the SP,LR,PC as a source.
    178   1.4      matt 	 */
    179   1.4      matt 	if ((insn & 0xffef0fff) != 0xeee10a10 || regno > 12)
    180   1.4      matt 		return 1;
    181   1.4      matt 
    182   1.4      matt 	struct pcb * const pcb = lwp_getpcb(l);
    183   1.4      matt 
    184   1.4      matt #ifdef FPU_VFP
    185   1.4      matt 	/*
    186   1.4      matt 	 * If FPU is valid somewhere, let's just reenable VFP and
    187   1.4      matt 	 * retry the instruction (only safe thing to do since the
    188   1.4      matt 	 * pcb has a stale copy).
    189   1.4      matt 	 */
    190   1.4      matt 	if (pcb->pcb_vfp.vfp_fpexc & VFP_FPEXC_EN)
    191   1.4      matt 		return 1;
    192   1.4      matt #endif
    193   1.4      matt 
    194   1.4      matt 	if (__predict_false((l->l_md.md_flags & MDLWP_VFPUSED) == 0)) {
    195   1.4      matt 		l->l_md.md_flags |= MDLWP_VFPUSED;
    196   1.4      matt 		pcb->pcb_vfp.vfp_fpscr =
    197   1.4      matt 		    (VFP_FPSCR_DN | VFP_FPSCR_FZ);	/* Runfast */
    198   1.4      matt 	}
    199   1.4      matt 
    200   1.4      matt 	/*
    201   1.4      matt 	 * We know know the pcb has the saved copy.
    202   1.4      matt 	 */
    203   1.4      matt 	register_t * const regp = &frame->tf_r0 + regno;
    204   1.4      matt 	if (insn & 0x00100000) {
    205   1.4      matt 		*regp = pcb->pcb_vfp.vfp_fpscr;
    206   1.4      matt 	} else {
    207  1.21      matt 		register_t tmp = *regp;
    208  1.21      matt 		if (!(cpu_media_and_vfp_features[0] & ARM_MVFR0_EXCEPT_MASK))
    209  1.21      matt 			tmp &= ~VFP_FPSCR_ESUM;
    210  1.21      matt 		pcb->pcb_vfp.vfp_fpscr = tmp;
    211   1.4      matt 	}
    212   1.4      matt 
    213   1.4      matt 	vfp_fpscr_ev.ev_count++;
    214   1.4      matt 
    215   1.4      matt 	frame->tf_pc += INSN_SIZE;
    216   1.4      matt 	return 0;
    217   1.1  rearnsha }
    218   1.1  rearnsha 
    219   1.4      matt #ifndef FPU_VFP
    220   1.4      matt /*
    221   1.4      matt  * If we don't want VFP support, we still need to handle emulating VFP FPSCR
    222   1.4      matt  * instructions.
    223   1.4      matt  */
    224   1.4      matt void
    225   1.4      matt vfp_attach(void)
    226   1.4      matt {
    227   1.4      matt 	install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
    228   1.4      matt }
    229   1.4      matt 
    230   1.4      matt #else
    231  1.16      matt #if 0
    232  1.12      matt static bool
    233  1.12      matt vfp_patch_branch(uintptr_t code, uintptr_t func, uintptr_t newfunc)
    234  1.12      matt {
    235  1.12      matt 	for (;; code += sizeof(uint32_t)) {
    236  1.12      matt 		uint32_t insn = *(uint32_t *)code;
    237  1.12      matt 		if ((insn & 0xffd08000) == 0xe8908000)	/* ldm ... { pc } */
    238  1.12      matt 			return false;
    239  1.12      matt 		if ((insn & 0xfffffff0) == 0xe12fff10)	/* bx rN */
    240  1.12      matt 			return false;
    241  1.12      matt 		if ((insn & 0xf1a0f000) == 0xe1a0f000)	/* mov pc, ... */
    242  1.12      matt 			return false;
    243  1.12      matt 		if ((insn >> 25) != 0x75)		/* not b/bl insn */
    244  1.12      matt 			continue;
    245  1.12      matt 		intptr_t imm26 = ((int32_t)insn << 8) >> 6;
    246  1.12      matt 		if (code + imm26 + 8 == func) {
    247  1.12      matt 			int32_t imm24 = (newfunc - (code + 8)) >> 2;
    248  1.12      matt 			uint32_t new_insn = (insn & 0xff000000)
    249  1.12      matt 			   | (imm24 & 0xffffff);
    250  1.12      matt 			KASSERTMSG((uint32_t)((imm24 >> 24) + 1) <= 1, "%x",
    251  1.12      matt 			    ((imm24 >> 24) + 1));
    252  1.12      matt 			*(uint32_t *)code = new_insn;
    253  1.12      matt 			cpu_idcache_wbinv_range(code, sizeof(uint32_t));
    254  1.12      matt 			return true;
    255  1.12      matt 		}
    256  1.12      matt 	}
    257  1.12      matt }
    258  1.16      matt #endif
    259  1.12      matt 
    260   1.1  rearnsha void
    261   1.2    cegger vfp_attach(void)
    262   1.1  rearnsha {
    263   1.4      matt 	struct cpu_info * const ci = curcpu();
    264   1.4      matt 	const char *model = NULL;
    265   1.7      matt 	bool vfp_p = false;
    266   1.1  rearnsha 
    267   1.7      matt 	if (CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)
    268   1.7      matt 	    || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)) {
    269   1.7      matt 		const uint32_t cpacr_vfp = CPACR_CPn(VFP_COPROC);
    270   1.7      matt 		const uint32_t cpacr_vfp2 = CPACR_CPn(VFP_COPROC2);
    271   1.1  rearnsha 
    272   1.7      matt 		/*
    273   1.7      matt 		 * We first need to enable access to the coprocessors.
    274   1.7      matt 		 */
    275   1.7      matt 		uint32_t cpacr = armreg_cpacr_read();
    276   1.7      matt 		cpacr |= __SHIFTIN(CPACR_ALL, cpacr_vfp);
    277   1.7      matt 		cpacr |= __SHIFTIN(CPACR_ALL, cpacr_vfp2);
    278  1.10      matt #if 0
    279   1.9      matt 		if (CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)) {
    280   1.9      matt 			/*
    281  1.10      matt 			 * Disable access to the upper 16 FP registers and NEON.
    282   1.9      matt 			 */
    283   1.9      matt 			cpacr |= CPACR_V7_D32DIS;
    284  1.10      matt 			cpacr |= CPACR_V7_ASEDIS;
    285   1.9      matt 		}
    286  1.10      matt #endif
    287   1.7      matt 		armreg_cpacr_write(cpacr);
    288   1.1  rearnsha 
    289   1.7      matt 		/*
    290   1.7      matt 		 * If we could enable them, then they exist.
    291   1.7      matt 		 */
    292   1.7      matt 		cpacr = armreg_cpacr_read();
    293   1.7      matt 		vfp_p = __SHIFTOUT(cpacr, cpacr_vfp2) != CPACR_NOACCESS
    294   1.7      matt 		    || __SHIFTOUT(cpacr, cpacr_vfp) != CPACR_NOACCESS;
    295   1.6      matt 	}
    296   1.6      matt 
    297   1.7      matt 	void *uh = install_coproc_handler(VFP_COPROC, vfp_test);
    298   1.7      matt 
    299   1.7      matt 	undefined_test = 0;
    300   1.7      matt 
    301  1.21      matt 	const uint32_t fpsid = armreg_fpsid_read();
    302   1.1  rearnsha 
    303   1.1  rearnsha 	remove_coproc_handler(uh);
    304   1.1  rearnsha 
    305   1.1  rearnsha 	if (undefined_test != 0) {
    306   1.4      matt 		aprint_normal_dev(ci->ci_dev, "No VFP detected\n");
    307   1.4      matt 		install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
    308   1.4      matt 		ci->ci_vfp_id = 0;
    309   1.1  rearnsha 		return;
    310   1.1  rearnsha 	}
    311   1.1  rearnsha 
    312   1.4      matt 	ci->ci_vfp_id = fpsid;
    313   1.4      matt 	switch (fpsid & ~ VFP_FPSID_REV_MSK) {
    314   1.4      matt 	case FPU_VFP10_ARM10E:
    315   1.4      matt 		model = "VFP10 R1";
    316   1.4      matt 		break;
    317   1.4      matt 	case FPU_VFP11_ARM11:
    318   1.4      matt 		model = "VFP11";
    319   1.4      matt 		break;
    320   1.7      matt 	case FPU_VFP_CORTEXA5:
    321   1.7      matt 	case FPU_VFP_CORTEXA7:
    322   1.7      matt 	case FPU_VFP_CORTEXA8:
    323   1.7      matt 	case FPU_VFP_CORTEXA9:
    324  1.20      matt 	case FPU_VFP_CORTEXA15:
    325   1.7      matt 		model = "NEON MPE (VFP 3.0+)";
    326  1.18      matt 		cpu_neon_present = 1;
    327   1.6      matt 		break;
    328   1.4      matt 	default:
    329   1.4      matt 		aprint_normal_dev(ci->ci_dev, "unrecognized VFP version %x\n",
    330   1.4      matt 		    fpsid);
    331   1.4      matt 		install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
    332   1.4      matt 		return;
    333   1.4      matt 	}
    334   1.1  rearnsha 
    335  1.17      matt 	cpu_fpu_present = 1;
    336  1.21      matt 	cpu_media_and_vfp_features[0] = armreg_mvfr0_read();
    337  1.21      matt 	cpu_media_and_vfp_features[1] = armreg_mvfr1_read();
    338   1.1  rearnsha 	if (fpsid != 0) {
    339   1.1  rearnsha 		aprint_normal("vfp%d at %s: %s\n",
    340  1.21      matt 		    device_unit(curcpu()->ci_dev),
    341  1.21      matt 		    device_xname(curcpu()->ci_dev),
    342   1.1  rearnsha 		    model);
    343  1.21      matt 		aprint_verbose("vfp%d: mvfr: [0]=%#x [1]=%#x\n",
    344  1.21      matt 		    device_unit(curcpu()->ci_dev),
    345  1.21      matt 		    cpu_media_and_vfp_features[0],
    346  1.21      matt 		    cpu_media_and_vfp_features[1]);
    347   1.1  rearnsha 	}
    348   1.1  rearnsha 	evcnt_attach_dynamic(&vfpevent_use, EVCNT_TYPE_MISC, NULL,
    349  1.12      matt 	    "VFP", "coproc use");
    350   1.1  rearnsha 	evcnt_attach_dynamic(&vfpevent_reuse, EVCNT_TYPE_MISC, NULL,
    351  1.12      matt 	    "VFP", "coproc re-use");
    352  1.21      matt 	evcnt_attach_dynamic(&vfpevent_fpe, EVCNT_TYPE_TRAP, NULL,
    353  1.21      matt 	    "VFP", "coproc fault");
    354   1.1  rearnsha 	install_coproc_handler(VFP_COPROC, vfp_handler);
    355   1.1  rearnsha 	install_coproc_handler(VFP_COPROC2, vfp_handler);
    356  1.13      matt #ifdef CPU_CORTEX
    357  1.13      matt 	install_coproc_handler(CORE_UNKNOWN_HANDLER, neon_handler);
    358  1.13      matt #endif
    359  1.12      matt 
    360  1.16      matt #if 0
    361  1.12      matt 	vfp_patch_branch((uintptr_t)pmap_copy_page_generic,
    362  1.12      matt 	   (uintptr_t)bcopy_page, (uintptr_t)bcopy_page_vfp);
    363  1.12      matt 	vfp_patch_branch((uintptr_t)pmap_zero_page_generic,
    364  1.12      matt 	   (uintptr_t)bzero_page, (uintptr_t)bzero_page_vfp);
    365  1.16      matt #endif
    366   1.1  rearnsha }
    367   1.1  rearnsha 
    368   1.1  rearnsha /* The real handler for VFP bounces.  */
    369   1.4      matt static int
    370  1.21      matt vfp_handler(u_int address, u_int insn, trapframe_t *frame, int fault_code)
    371   1.1  rearnsha {
    372   1.4      matt 	struct cpu_info * const ci = curcpu();
    373   1.1  rearnsha 
    374   1.1  rearnsha 	/* This shouldn't ever happen.  */
    375   1.1  rearnsha 	if (fault_code != FAULT_USER)
    376  1.14      matt 		panic("VFP fault at %#x in non-user mode", frame->tf_pc);
    377   1.1  rearnsha 
    378   1.4      matt 	if (ci->ci_vfp_id == 0)
    379   1.1  rearnsha 		/* No VFP detected, just fault.  */
    380   1.1  rearnsha 		return 1;
    381   1.1  rearnsha 
    382  1.21      matt 	uint32_t fpexc = armreg_fpexc_read();
    383  1.21      matt 	if (fpexc & VFP_FPEXC_EX) {
    384  1.21      matt 		ksiginfo_t ksi;
    385  1.21      matt 		KASSERT(fpexc & VFP_FPEXC_EN);
    386  1.21      matt 
    387  1.21      matt 		vfpevent_fpe.ev_count++;
    388  1.21      matt 
    389  1.21      matt 		pcu_save(&arm_vfp_ops);
    390  1.21      matt 
    391  1.21      matt 		/*
    392  1.21      matt 		 * Need the clear the exception condition so any signal
    393  1.21      matt 		 * can run.
    394  1.21      matt 		 */
    395  1.21      matt 		armreg_fpexc_write(fpexc & ~(VFP_FPEXC_EX|VFP_FPEXE_FSUM));
    396  1.21      matt 
    397  1.21      matt 		KSI_INIT_TRAP(&ksi);
    398  1.21      matt 		ksi.ksi_signo = SIGFPE;
    399  1.21      matt 		if (fpexc & VFP_FPEXC_IXF)
    400  1.21      matt 			ksi.ksi_code = FPE_FLTRES;
    401  1.21      matt 		else if (fpexc & VFP_FPEXC_UFF)
    402  1.21      matt 			ksi.ksi_code = FPE_FLTUND;
    403  1.21      matt 		else if (fpexc & VFP_FPEXC_OFF)
    404  1.21      matt 			ksi.ksi_code = FPE_FLTOVF;
    405  1.21      matt 		else if (fpexc & VFP_FPEXC_DZF)
    406  1.21      matt 			ksi.ksi_code = FPE_FLTDIV;
    407  1.21      matt 		else if (fpexc & VFP_FPEXC_IOF)
    408  1.21      matt 			ksi.ksi_code = FPE_FLTINV;
    409  1.21      matt 		ksi.ksi_addr = (uint32_t *)address;
    410  1.21      matt 		ksi.ksi_trap = 0;
    411  1.21      matt 		trapsignal(curlwp, &ksi);
    412  1.21      matt 		return 0;
    413  1.21      matt 	}
    414  1.21      matt 
    415   1.4      matt 	/*
    416   1.4      matt 	 * If we are just changing/fetching FPSCR, don't bother loading it.
    417   1.4      matt 	 */
    418   1.4      matt 	if (!vfp_fpscr_handler(address, insn, frame, fault_code))
    419   1.4      matt 		return 0;
    420   1.1  rearnsha 
    421   1.4      matt 	pcu_load(&arm_vfp_ops);
    422   1.3     rmind 
    423   1.4      matt 	/* Need to restart the faulted instruction.  */
    424   1.4      matt //	frame->tf_pc -= INSN_SIZE;
    425   1.4      matt 	return 0;
    426   1.4      matt }
    427   1.1  rearnsha 
    428  1.13      matt #ifdef CPU_CORTEX
    429  1.13      matt /* The real handler for NEON bounces.  */
    430  1.13      matt static int
    431  1.21      matt neon_handler(u_int address, u_int insn, trapframe_t *frame, int fault_code)
    432  1.13      matt {
    433  1.13      matt 	struct cpu_info * const ci = curcpu();
    434  1.13      matt 
    435  1.13      matt 	if (ci->ci_vfp_id == 0)
    436  1.13      matt 		/* No VFP detected, just fault.  */
    437  1.13      matt 		return 1;
    438  1.13      matt 
    439  1.13      matt 	if ((insn & 0xfe000000) != 0xf2000000
    440  1.13      matt 	    && (insn & 0xfe000000) != 0xf4000000)
    441  1.13      matt 		/* Not NEON instruction, just fault.  */
    442  1.13      matt 		return 1;
    443  1.13      matt 
    444  1.13      matt 	/* This shouldn't ever happen.  */
    445  1.13      matt 	if (fault_code != FAULT_USER)
    446  1.13      matt 		panic("NEON fault in non-user mode");
    447  1.13      matt 
    448  1.13      matt 	pcu_load(&arm_vfp_ops);
    449  1.13      matt 
    450  1.13      matt 	/* Need to restart the faulted instruction.  */
    451  1.13      matt //	frame->tf_pc -= INSN_SIZE;
    452  1.13      matt 	return 0;
    453  1.13      matt }
    454  1.13      matt #endif
    455  1.13      matt 
    456   1.4      matt static void
    457  1.13      matt vfp_state_load(lwp_t *l, u_int flags)
    458   1.4      matt {
    459   1.4      matt 	struct pcb * const pcb = lwp_getpcb(l);
    460  1.13      matt 
    461  1.13      matt 	KASSERT(flags & PCU_ENABLE);
    462  1.13      matt 
    463  1.13      matt 	if (flags & PCU_KERNEL) {
    464  1.13      matt 		if ((flags & PCU_LOADED) == 0) {
    465  1.13      matt 			pcb->pcb_kernel_vfp.vfp_fpexc = pcb->pcb_vfp.vfp_fpexc;
    466  1.13      matt 		}
    467  1.15      matt 		pcb->pcb_vfp.vfp_fpexc = VFP_FPEXC_EN;
    468  1.21      matt 		armreg_fpexc_write(pcb->pcb_vfp.vfp_fpexc);
    469  1.13      matt 		/*
    470  1.13      matt 		 * Load the kernel registers (just the first 16) if they've
    471  1.13      matt 		 * been used..
    472  1.13      matt 		 */
    473  1.13      matt 		if (flags & PCU_LOADED) {
    474  1.13      matt 			load_vfpregs_lo(pcb->pcb_kernel_vfp.vfp_regs);
    475  1.13      matt 		}
    476  1.13      matt 		return;
    477  1.13      matt 	}
    478   1.4      matt 	struct vfpreg * const fregs = &pcb->pcb_vfp;
    479   1.1  rearnsha 
    480   1.1  rearnsha 	/*
    481   1.1  rearnsha 	 * Instrument VFP usage -- if a process has not previously
    482   1.1  rearnsha 	 * used the VFP, mark it as having used VFP for the first time,
    483   1.1  rearnsha 	 * and count this event.
    484   1.1  rearnsha 	 *
    485   1.1  rearnsha 	 * If a process has used the VFP, count a "used VFP, and took
    486   1.1  rearnsha 	 * a trap to use it again" event.
    487   1.1  rearnsha 	 */
    488   1.4      matt 	if (__predict_false((l->l_md.md_flags & MDLWP_VFPUSED) == 0)) {
    489   1.1  rearnsha 		vfpevent_use.ev_count++;
    490   1.4      matt 		l->l_md.md_flags |= MDLWP_VFPUSED;
    491  1.22      matt 		pcb->pcb_vfp.vfp_fpscr =	/* Runfast */
    492  1.22      matt 		    (VFP_FPSCR_DN | VFP_FPSCR_FZ | VFP_FPSCR_RN);
    493   1.4      matt 	} else {
    494   1.1  rearnsha 		vfpevent_reuse.ev_count++;
    495   1.4      matt 	}
    496   1.1  rearnsha 
    497   1.4      matt 	if (fregs->vfp_fpexc & VFP_FPEXC_EN) {
    498   1.4      matt 		/*
    499   1.4      matt 		 * If we think the VFP is enabled, it must have be disabled by
    500   1.4      matt 		 * vfp_state_release for another LWP so we can just restore
    501   1.4      matt 		 * FPEXC and return since our VFP state is still loaded.
    502   1.4      matt 		 */
    503  1.21      matt 		armreg_fpexc_write(fregs->vfp_fpexc);
    504   1.4      matt 		return;
    505   1.4      matt 	}
    506   1.1  rearnsha 
    507  1.13      matt 	/* Load and Enable the VFP (so that we can write the registers).  */
    508  1.13      matt 	if (flags & PCU_RELOAD) {
    509  1.21      matt 		uint32_t fpexc = armreg_fpexc_read();
    510  1.13      matt 		KDASSERT((fpexc & VFP_FPEXC_EX) == 0);
    511  1.21      matt 		armreg_fpexc_write(fpexc | VFP_FPEXC_EN);
    512  1.13      matt 
    513  1.13      matt 		load_vfpregs(fregs);
    514  1.21      matt 		armreg_fpscr_write(fregs->vfp_fpscr);
    515  1.13      matt 
    516  1.13      matt 		if (fregs->vfp_fpexc & VFP_FPEXC_EX) {
    517  1.13      matt 			/* Need to restore the exception handling state.  */
    518  1.21      matt 			armreg_fpinst2_write(fregs->vfp_fpinst2);
    519  1.21      matt 			if (fregs->vfp_fpexc & VFP_FPEXC_FP2V)
    520  1.21      matt 				armreg_fpinst_write(fregs->vfp_fpinst);
    521   1.1  rearnsha 		}
    522   1.1  rearnsha 	}
    523   1.4      matt 
    524   1.4      matt 	/* Finally, restore the FPEXC but don't enable the VFP. */
    525   1.4      matt 	fregs->vfp_fpexc |= VFP_FPEXC_EN;
    526  1.21      matt 	armreg_fpexc_write(fregs->vfp_fpexc);
    527   1.1  rearnsha }
    528   1.1  rearnsha 
    529   1.1  rearnsha void
    530  1.13      matt vfp_state_save(lwp_t *l, u_int flags)
    531   1.1  rearnsha {
    532   1.4      matt 	struct pcb * const pcb = lwp_getpcb(l);
    533  1.21      matt 	uint32_t fpexc = armreg_fpexc_read();
    534  1.21      matt 	armreg_fpexc_write((fpexc | VFP_FPEXC_EN) & ~VFP_FPEXC_EX);
    535   1.1  rearnsha 
    536  1.13      matt 	if (flags & PCU_KERNEL) {
    537  1.13      matt 		/*
    538  1.13      matt 		 * Save the kernel set of VFP registers.
    539  1.13      matt 		 * (just the first 16).
    540  1.13      matt 		 */
    541  1.13      matt 		save_vfpregs_lo(pcb->pcb_kernel_vfp.vfp_regs);
    542   1.1  rearnsha 		return;
    543  1.13      matt 	}
    544  1.13      matt 
    545  1.13      matt 	struct vfpreg * const fregs = &pcb->pcb_vfp;
    546   1.1  rearnsha 
    547   1.4      matt 	/*
    548   1.4      matt 	 * Enable the VFP (so we can read the registers).
    549   1.4      matt 	 * Make sure the exception bit is cleared so that we can
    550   1.4      matt 	 * safely dump the registers.
    551   1.4      matt 	 */
    552   1.4      matt 	fregs->vfp_fpexc = fpexc;
    553   1.4      matt 	if (fpexc & VFP_FPEXC_EX) {
    554   1.4      matt 		/* Need to save the exception handling state */
    555  1.21      matt 		fregs->vfp_fpinst = armreg_fpinst_read();
    556  1.21      matt 		if (fpexc & VFP_FPEXC_FP2V)
    557  1.21      matt 			fregs->vfp_fpinst2 = armreg_fpinst2_read();
    558   1.1  rearnsha 	}
    559  1.21      matt 	fregs->vfp_fpscr = armreg_fpscr_read();
    560  1.13      matt 	save_vfpregs(fregs);
    561   1.4      matt 
    562   1.1  rearnsha 	/* Disable the VFP.  */
    563  1.21      matt 	armreg_fpexc_write(fpexc);
    564   1.1  rearnsha }
    565   1.1  rearnsha 
    566   1.1  rearnsha void
    567  1.13      matt vfp_state_release(lwp_t *l, u_int flags)
    568   1.1  rearnsha {
    569   1.4      matt 	struct pcb * const pcb = lwp_getpcb(l);
    570   1.1  rearnsha 
    571  1.13      matt 	if (flags & PCU_KERNEL) {
    572  1.13      matt 		/*
    573  1.13      matt 		 * Restore the FPEXC since we borrowed that field.
    574  1.13      matt 		 */
    575  1.13      matt 		pcb->pcb_vfp.vfp_fpexc = pcb->pcb_kernel_vfp.vfp_fpexc;
    576  1.13      matt 	} else {
    577  1.13      matt 		/*
    578  1.13      matt 		 * Now mark the VFP as disabled (and our state
    579  1.13      matt 		 * has been already saved or is being discarded).
    580  1.13      matt 		 */
    581  1.13      matt 		pcb->pcb_vfp.vfp_fpexc &= ~VFP_FPEXC_EN;
    582  1.13      matt 	}
    583   1.1  rearnsha 
    584   1.1  rearnsha 	/*
    585   1.4      matt 	 * Turn off the FPU so the next time a VFP instruction is issued
    586   1.4      matt 	 * an exception happens.  We don't know if this LWP's state was
    587   1.4      matt 	 * loaded but if we turned off the FPU for some other LWP, when
    588   1.4      matt 	 * pcu_load invokes vfp_state_load it will see that VFP_FPEXC_EN
    589  1.13      matt 	 * is still set so it just restore fpexc and return since its
    590   1.4      matt 	 * contents are still sitting in the VFP.
    591   1.1  rearnsha 	 */
    592  1.21      matt 	armreg_fpexc_write(armreg_fpexc_read() & ~VFP_FPEXC_EN);
    593   1.1  rearnsha }
    594   1.1  rearnsha 
    595   1.1  rearnsha void
    596   1.2    cegger vfp_savecontext(void)
    597   1.1  rearnsha {
    598   1.4      matt 	pcu_save(&arm_vfp_ops);
    599   1.1  rearnsha }
    600   1.1  rearnsha 
    601   1.1  rearnsha void
    602   1.4      matt vfp_discardcontext(void)
    603   1.1  rearnsha {
    604   1.4      matt 	pcu_discard(&arm_vfp_ops);
    605   1.4      matt }
    606   1.1  rearnsha 
    607   1.8      matt void
    608  1.13      matt vfp_kernel_acquire(void)
    609  1.13      matt {
    610  1.13      matt 	if (__predict_false(cpu_intr_p())) {
    611  1.21      matt 		armreg_fpexc_write(VFP_FPEXC_EN);
    612  1.13      matt 		if (curcpu()->ci_data.cpu_pcu_curlwp[PCU_FPU] != NULL) {
    613  1.13      matt 			lwp_t * const l = curlwp;
    614  1.13      matt 			struct pcb * const pcb = lwp_getpcb(l);
    615  1.13      matt 			KASSERT((l->l_md.md_flags & MDLWP_VFPINTR) == 0);
    616  1.13      matt 			l->l_md.md_flags |= MDLWP_VFPINTR;
    617  1.13      matt 			save_vfpregs_lo(&pcb->pcb_kernel_vfp.vfp_regs[16]);
    618  1.13      matt 		}
    619  1.13      matt 	} else {
    620  1.13      matt 		pcu_kernel_acquire(&arm_vfp_ops);
    621  1.13      matt 	}
    622  1.13      matt }
    623  1.13      matt 
    624  1.13      matt void
    625  1.13      matt vfp_kernel_release(void)
    626  1.13      matt {
    627  1.13      matt 	if (__predict_false(cpu_intr_p())) {
    628  1.13      matt 		uint32_t fpexc = 0;
    629  1.13      matt 		if (curcpu()->ci_data.cpu_pcu_curlwp[PCU_FPU] != NULL) {
    630  1.13      matt 			lwp_t * const l = curlwp;
    631  1.13      matt 			struct pcb * const pcb = lwp_getpcb(l);
    632  1.13      matt 			KASSERT(l->l_md.md_flags & MDLWP_VFPINTR);
    633  1.13      matt 			load_vfpregs_lo(&pcb->pcb_kernel_vfp.vfp_regs[16]);
    634  1.13      matt 			l->l_md.md_flags &= ~MDLWP_VFPINTR;
    635  1.13      matt 			fpexc = pcb->pcb_vfp.vfp_fpexc;
    636  1.13      matt 		}
    637  1.21      matt 		armreg_fpexc_write(fpexc);
    638  1.13      matt 	} else {
    639  1.13      matt 		pcu_kernel_release(&arm_vfp_ops);
    640  1.13      matt 	}
    641  1.13      matt }
    642  1.13      matt 
    643  1.13      matt void
    644   1.8      matt vfp_getcontext(struct lwp *l, mcontext_t *mcp, int *flagsp)
    645   1.8      matt {
    646   1.8      matt 	if (l->l_md.md_flags & MDLWP_VFPUSED) {
    647   1.8      matt 		const struct pcb * const pcb = lwp_getpcb(l);
    648   1.8      matt 		pcu_save(&arm_vfp_ops);
    649   1.8      matt 		mcp->__fpu.__vfpregs.__vfp_fpscr = pcb->pcb_vfp.vfp_fpscr;
    650   1.8      matt 		memcpy(mcp->__fpu.__vfpregs.__vfp_fstmx, pcb->pcb_vfp.vfp_regs,
    651   1.8      matt 		    sizeof(mcp->__fpu.__vfpregs.__vfp_fstmx));
    652  1.10      matt 		*flagsp |= _UC_FPU|_UC_ARM_VFP;
    653   1.8      matt 	}
    654   1.8      matt }
    655   1.8      matt 
    656   1.8      matt void
    657   1.8      matt vfp_setcontext(struct lwp *l, const mcontext_t *mcp)
    658   1.8      matt {
    659   1.8      matt 	pcu_discard(&arm_vfp_ops);
    660   1.8      matt 	struct pcb * const pcb = lwp_getpcb(l);
    661   1.8      matt 	l->l_md.md_flags |= MDLWP_VFPUSED;
    662   1.8      matt 	pcb->pcb_vfp.vfp_fpscr = mcp->__fpu.__vfpregs.__vfp_fpscr;
    663   1.8      matt 	memcpy(pcb->pcb_vfp.vfp_regs, mcp->__fpu.__vfpregs.__vfp_fstmx,
    664   1.8      matt 	    sizeof(mcp->__fpu.__vfpregs.__vfp_fstmx));
    665   1.8      matt }
    666   1.8      matt 
    667   1.4      matt #endif /* FPU_VFP */
    668