vfp_init.c revision 1.15 1 /* $NetBSD: vfp_init.c,v 1.15 2012/12/31 03:23:53 matt Exp $ */
2
3 /*
4 * Copyright (c) 2008 ARM Ltd
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the company may not be used to endorse or promote
16 * products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/proc.h>
37 #include <sys/cpu.h>
38
39 #include <arm/pcb.h>
40 #include <arm/undefined.h>
41 #include <arm/vfpreg.h>
42 #include <arm/mcontext.h>
43
44 #include <uvm/uvm_extern.h> /* for pmap.h */
45
46 /*
47 * Use generic co-processor instructions to avoid assembly problems.
48 */
49
50 /* FMRX <X>, fpsid */
51 static inline uint32_t
52 read_fpsid(void)
53 {
54 uint32_t rv;
55 __asm __volatile("mrc p10, 7, %0, c0, c0, 0" : "=r" (rv));
56 return rv;
57 }
58
59 /* FMRX <X>, fpexc */
60 static inline uint32_t
61 read_fpscr(void)
62 {
63 uint32_t rv;
64 __asm __volatile("mrc p10, 7, %0, c1, c0, 0" : "=r" (rv));
65 return rv;
66 }
67
68 /* FMRX <X>, fpexc */
69 static inline uint32_t
70 read_fpexc(void)
71 {
72 uint32_t rv;
73 __asm __volatile("mrc p10, 7, %0, c8, c0, 0" : "=r" (rv));
74 return rv;
75 }
76
77 /* FMRX <X>, fpinst */
78 static inline uint32_t
79 read_fpinst(void)
80 {
81 uint32_t rv;
82 __asm __volatile("mrc p10, 7, %0, c9, c0, 0" : "=r" (rv));
83 return rv;
84 }
85
86 /* FMRX <X>, fpinst2 */
87 static inline uint32_t
88 read_fpinst2(void)
89 {
90 uint32_t rv;
91 __asm __volatile("mrc p10, 7, %0, c10, c0, 0" : "=r" (rv));
92 return rv;
93 }
94
95 /* FMXR <X>, fpscr */
96 #define write_fpscr(X) __asm __volatile("mcr p10, 7, %0, c1, c0, 0" : \
97 : "r" (X))
98 /* FMXR <X>, fpexc */
99 #define write_fpexc(X) __asm __volatile("mcr p10, 7, %0, c8, c0, 0" : \
100 : "r" (X))
101 /* FMXR <X>, fpinst */
102 #define write_fpinst(X) __asm __volatile("mcr p10, 7, %0, c9, c0, 0" : \
103 : "r" (X))
104 /* FMXR <X>, fpinst2 */
105 #define write_fpinst2(X) __asm __volatile("mcr p10, 7, %0, c10, c0, 0" : \
106 : "r" (X))
107
108 #ifdef FPU_VFP
109
110 /* FLDMD <X>, {d0-d15} */
111 static inline void
112 load_vfpregs_lo(const uint64_t *p)
113 {
114 /* vldmia rN, {d0-d15} */
115 __asm __volatile("ldc\tp11, c0, [%0], {32}" :: "r" (p) : "memory");
116 }
117
118 /* FSTMD <X>, {d0-d15} */
119 static inline void
120 save_vfpregs_lo(uint64_t *p)
121 {
122 __asm __volatile("stc\tp11, c0, [%0], {32}" :: "r" (p) : "memory");
123 }
124
125 #ifdef CPU_CORTEX
126 /* FLDMD <X>, {d16-d31} */
127 static inline void
128 load_vfpregs_hi(const uint64_t *p)
129 {
130 __asm __volatile("ldcl\tp11, c0, [%0], {32}" :: "r" (&p[16]) : "memory");
131 }
132
133 /* FLDMD <X>, {d16-d31} */
134 static inline void
135 save_vfpregs_hi(uint64_t *p)
136 {
137 __asm __volatile("stcl\tp11, c0, [%0], {32}" :: "r" (&p[16]) : "memory");
138 }
139 #endif
140
141 static inline void
142 load_vfpregs(const struct vfpreg *fregs)
143 {
144 load_vfpregs_lo(fregs->vfp_regs);
145 #ifdef CPU_CORTEX
146 #ifdef CPU_ARM11
147 switch (curcpu()->ci_vfp_id) {
148 case FPU_VFP_CORTEXA5:
149 case FPU_VFP_CORTEXA7:
150 case FPU_VFP_CORTEXA8:
151 case FPU_VFP_CORTEXA9:
152 #endif
153 load_vfpregs_hi(fregs->vfp_regs);
154 #ifdef CPU_ARM11
155 break;
156 }
157 #endif
158 #endif
159 }
160
161 static inline void
162 save_vfpregs(struct vfpreg *fregs)
163 {
164 save_vfpregs_lo(fregs->vfp_regs);
165 #ifdef CPU_CORTEX
166 #ifdef CPU_ARM11
167 switch (curcpu()->ci_vfp_id) {
168 case FPU_VFP_CORTEXA5:
169 case FPU_VFP_CORTEXA7:
170 case FPU_VFP_CORTEXA8:
171 case FPU_VFP_CORTEXA9:
172 #endif
173 save_vfpregs_hi(fregs->vfp_regs);
174 #ifdef CPU_ARM11
175 break;
176 }
177 #endif
178 #endif
179 }
180
181 /* The real handler for VFP bounces. */
182 static int vfp_handler(u_int, u_int, trapframe_t *, int);
183 #ifdef CPU_CORTEX
184 static int neon_handler(u_int, u_int, trapframe_t *, int);
185 #endif
186
187 static void vfp_state_load(lwp_t *, u_int);
188 static void vfp_state_save(lwp_t *, u_int);
189 static void vfp_state_release(lwp_t *, u_int);
190
191 const pcu_ops_t arm_vfp_ops = {
192 .pcu_id = PCU_FPU,
193 .pcu_state_save = vfp_state_save,
194 .pcu_state_load = vfp_state_load,
195 .pcu_state_release = vfp_state_release,
196 };
197
198 struct evcnt vfpevent_use;
199 struct evcnt vfpevent_reuse;
200
201 /*
202 * Used to test for a VFP. The following function is installed as a coproc10
203 * handler on the undefined instruction vector and then we issue a VFP
204 * instruction. If undefined_test is non zero then the VFP did not handle
205 * the instruction so must be absent, or disabled.
206 */
207
208 static int undefined_test;
209
210 static int
211 vfp_test(u_int address, u_int insn, trapframe_t *frame, int fault_code)
212 {
213
214 frame->tf_pc += INSN_SIZE;
215 ++undefined_test;
216 return 0;
217 }
218
219 #endif /* FPU_VFP */
220
221 struct evcnt vfp_fpscr_ev =
222 EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "VFP", "FPSCR traps");
223 EVCNT_ATTACH_STATIC(vfp_fpscr_ev);
224
225 static int
226 vfp_fpscr_handler(u_int address, u_int insn, trapframe_t *frame, int fault_code)
227 {
228 struct lwp * const l = curlwp;
229 const u_int regno = (insn >> 12) & 0xf;
230 /*
231 * Only match move to/from the FPSCR register and we
232 * can't be using the SP,LR,PC as a source.
233 */
234 if ((insn & 0xffef0fff) != 0xeee10a10 || regno > 12)
235 return 1;
236
237 struct pcb * const pcb = lwp_getpcb(l);
238
239 #ifdef FPU_VFP
240 /*
241 * If FPU is valid somewhere, let's just reenable VFP and
242 * retry the instruction (only safe thing to do since the
243 * pcb has a stale copy).
244 */
245 if (pcb->pcb_vfp.vfp_fpexc & VFP_FPEXC_EN)
246 return 1;
247 #endif
248
249 if (__predict_false((l->l_md.md_flags & MDLWP_VFPUSED) == 0)) {
250 l->l_md.md_flags |= MDLWP_VFPUSED;
251 pcb->pcb_vfp.vfp_fpscr =
252 (VFP_FPSCR_DN | VFP_FPSCR_FZ); /* Runfast */
253 }
254
255 /*
256 * We know know the pcb has the saved copy.
257 */
258 register_t * const regp = &frame->tf_r0 + regno;
259 if (insn & 0x00100000) {
260 *regp = pcb->pcb_vfp.vfp_fpscr;
261 } else {
262 pcb->pcb_vfp.vfp_fpscr = *regp;
263 }
264
265 vfp_fpscr_ev.ev_count++;
266
267 frame->tf_pc += INSN_SIZE;
268 return 0;
269 }
270
271 #ifndef FPU_VFP
272 /*
273 * If we don't want VFP support, we still need to handle emulating VFP FPSCR
274 * instructions.
275 */
276 void
277 vfp_attach(void)
278 {
279 install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
280 }
281
282 #else
283 static bool
284 vfp_patch_branch(uintptr_t code, uintptr_t func, uintptr_t newfunc)
285 {
286 for (;; code += sizeof(uint32_t)) {
287 uint32_t insn = *(uint32_t *)code;
288 if ((insn & 0xffd08000) == 0xe8908000) /* ldm ... { pc } */
289 return false;
290 if ((insn & 0xfffffff0) == 0xe12fff10) /* bx rN */
291 return false;
292 if ((insn & 0xf1a0f000) == 0xe1a0f000) /* mov pc, ... */
293 return false;
294 if ((insn >> 25) != 0x75) /* not b/bl insn */
295 continue;
296 intptr_t imm26 = ((int32_t)insn << 8) >> 6;
297 if (code + imm26 + 8 == func) {
298 int32_t imm24 = (newfunc - (code + 8)) >> 2;
299 uint32_t new_insn = (insn & 0xff000000)
300 | (imm24 & 0xffffff);
301 KASSERTMSG((uint32_t)((imm24 >> 24) + 1) <= 1, "%x",
302 ((imm24 >> 24) + 1));
303 *(uint32_t *)code = new_insn;
304 cpu_idcache_wbinv_range(code, sizeof(uint32_t));
305 return true;
306 }
307 }
308 }
309
310 void
311 vfp_attach(void)
312 {
313 struct cpu_info * const ci = curcpu();
314 const char *model = NULL;
315 bool vfp_p = false;
316
317 if (CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)
318 || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)) {
319 const uint32_t cpacr_vfp = CPACR_CPn(VFP_COPROC);
320 const uint32_t cpacr_vfp2 = CPACR_CPn(VFP_COPROC2);
321
322 /*
323 * We first need to enable access to the coprocessors.
324 */
325 uint32_t cpacr = armreg_cpacr_read();
326 cpacr |= __SHIFTIN(CPACR_ALL, cpacr_vfp);
327 cpacr |= __SHIFTIN(CPACR_ALL, cpacr_vfp2);
328 #if 0
329 if (CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)) {
330 /*
331 * Disable access to the upper 16 FP registers and NEON.
332 */
333 cpacr |= CPACR_V7_D32DIS;
334 cpacr |= CPACR_V7_ASEDIS;
335 }
336 #endif
337 armreg_cpacr_write(cpacr);
338
339 /*
340 * If we could enable them, then they exist.
341 */
342 cpacr = armreg_cpacr_read();
343 vfp_p = __SHIFTOUT(cpacr, cpacr_vfp2) != CPACR_NOACCESS
344 || __SHIFTOUT(cpacr, cpacr_vfp) != CPACR_NOACCESS;
345 }
346
347 void *uh = install_coproc_handler(VFP_COPROC, vfp_test);
348
349 undefined_test = 0;
350
351 const uint32_t fpsid = read_fpsid();
352
353 remove_coproc_handler(uh);
354
355 if (undefined_test != 0) {
356 aprint_normal_dev(ci->ci_dev, "No VFP detected\n");
357 install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
358 ci->ci_vfp_id = 0;
359 return;
360 }
361
362 ci->ci_vfp_id = fpsid;
363 switch (fpsid & ~ VFP_FPSID_REV_MSK) {
364 case FPU_VFP10_ARM10E:
365 model = "VFP10 R1";
366 break;
367 case FPU_VFP11_ARM11:
368 model = "VFP11";
369 break;
370 case FPU_VFP_CORTEXA5:
371 case FPU_VFP_CORTEXA7:
372 case FPU_VFP_CORTEXA8:
373 case FPU_VFP_CORTEXA9:
374 model = "NEON MPE (VFP 3.0+)";
375 break;
376 default:
377 aprint_normal_dev(ci->ci_dev, "unrecognized VFP version %x\n",
378 fpsid);
379 install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
380 return;
381 }
382
383 if (fpsid != 0) {
384 aprint_normal("vfp%d at %s: %s\n",
385 device_unit(curcpu()->ci_dev), device_xname(curcpu()->ci_dev),
386 model);
387 }
388 evcnt_attach_dynamic(&vfpevent_use, EVCNT_TYPE_MISC, NULL,
389 "VFP", "coproc use");
390 evcnt_attach_dynamic(&vfpevent_reuse, EVCNT_TYPE_MISC, NULL,
391 "VFP", "coproc re-use");
392 install_coproc_handler(VFP_COPROC, vfp_handler);
393 install_coproc_handler(VFP_COPROC2, vfp_handler);
394 #ifdef CPU_CORTEX
395 install_coproc_handler(CORE_UNKNOWN_HANDLER, neon_handler);
396 #endif
397
398 vfp_patch_branch((uintptr_t)pmap_copy_page_generic,
399 (uintptr_t)bcopy_page, (uintptr_t)bcopy_page_vfp);
400 vfp_patch_branch((uintptr_t)pmap_zero_page_generic,
401 (uintptr_t)bzero_page, (uintptr_t)bzero_page_vfp);
402 }
403
404 /* The real handler for VFP bounces. */
405 static int
406 vfp_handler(u_int address, u_int insn, trapframe_t *frame,
407 int fault_code)
408 {
409 struct cpu_info * const ci = curcpu();
410
411 /* This shouldn't ever happen. */
412 if (fault_code != FAULT_USER)
413 panic("VFP fault at %#x in non-user mode", frame->tf_pc);
414
415 if (ci->ci_vfp_id == 0)
416 /* No VFP detected, just fault. */
417 return 1;
418
419 /*
420 * If we are just changing/fetching FPSCR, don't bother loading it.
421 */
422 if (!vfp_fpscr_handler(address, insn, frame, fault_code))
423 return 0;
424
425 pcu_load(&arm_vfp_ops);
426
427 /* Need to restart the faulted instruction. */
428 // frame->tf_pc -= INSN_SIZE;
429 return 0;
430 }
431
432 #ifdef CPU_CORTEX
433 /* The real handler for NEON bounces. */
434 static int
435 neon_handler(u_int address, u_int insn, trapframe_t *frame,
436 int fault_code)
437 {
438 struct cpu_info * const ci = curcpu();
439
440 if (ci->ci_vfp_id == 0)
441 /* No VFP detected, just fault. */
442 return 1;
443
444 if ((insn & 0xfe000000) != 0xf2000000
445 && (insn & 0xfe000000) != 0xf4000000)
446 /* Not NEON instruction, just fault. */
447 return 1;
448
449 /* This shouldn't ever happen. */
450 if (fault_code != FAULT_USER)
451 panic("NEON fault in non-user mode");
452
453 pcu_load(&arm_vfp_ops);
454
455 /* Need to restart the faulted instruction. */
456 // frame->tf_pc -= INSN_SIZE;
457 return 0;
458 }
459 #endif
460
461 static void
462 vfp_state_load(lwp_t *l, u_int flags)
463 {
464 struct pcb * const pcb = lwp_getpcb(l);
465
466 KASSERT(flags & PCU_ENABLE);
467
468 if (flags & PCU_KERNEL) {
469 if ((flags & PCU_LOADED) == 0) {
470 pcb->pcb_kernel_vfp.vfp_fpexc = pcb->pcb_vfp.vfp_fpexc;
471 }
472 pcb->pcb_vfp.vfp_fpexc = VFP_FPEXC_EN;
473 write_fpexc(pcb->pcb_vfp.vfp_fpexc);
474 /*
475 * Load the kernel registers (just the first 16) if they've
476 * been used..
477 */
478 if (flags & PCU_LOADED) {
479 load_vfpregs_lo(pcb->pcb_kernel_vfp.vfp_regs);
480 }
481 return;
482 }
483 struct vfpreg * const fregs = &pcb->pcb_vfp;
484
485 /*
486 * Instrument VFP usage -- if a process has not previously
487 * used the VFP, mark it as having used VFP for the first time,
488 * and count this event.
489 *
490 * If a process has used the VFP, count a "used VFP, and took
491 * a trap to use it again" event.
492 */
493 if (__predict_false((l->l_md.md_flags & MDLWP_VFPUSED) == 0)) {
494 vfpevent_use.ev_count++;
495 l->l_md.md_flags |= MDLWP_VFPUSED;
496 pcb->pcb_vfp.vfp_fpscr =
497 (VFP_FPSCR_DN | VFP_FPSCR_FZ); /* Runfast */
498 } else {
499 vfpevent_reuse.ev_count++;
500 }
501
502 if (fregs->vfp_fpexc & VFP_FPEXC_EN) {
503 /*
504 * If we think the VFP is enabled, it must have be disabled by
505 * vfp_state_release for another LWP so we can just restore
506 * FPEXC and return since our VFP state is still loaded.
507 */
508 write_fpexc(fregs->vfp_fpexc);
509 return;
510 }
511
512 /* Load and Enable the VFP (so that we can write the registers). */
513 if (flags & PCU_RELOAD) {
514 uint32_t fpexc = read_fpexc();
515 KDASSERT((fpexc & VFP_FPEXC_EX) == 0);
516 write_fpexc(fpexc | VFP_FPEXC_EN);
517
518 load_vfpregs(fregs);
519 write_fpscr(fregs->vfp_fpscr);
520
521 if (fregs->vfp_fpexc & VFP_FPEXC_EX) {
522 struct cpu_info * const ci = curcpu();
523 /* Need to restore the exception handling state. */
524 switch (ci->ci_vfp_id) {
525 case FPU_VFP10_ARM10E:
526 case FPU_VFP11_ARM11:
527 case FPU_VFP_CORTEXA5:
528 case FPU_VFP_CORTEXA7:
529 case FPU_VFP_CORTEXA8:
530 case FPU_VFP_CORTEXA9:
531 write_fpinst2(fregs->vfp_fpinst2);
532 write_fpinst(fregs->vfp_fpinst);
533 break;
534 default:
535 panic("%s: Unsupported VFP %#x",
536 __func__, ci->ci_vfp_id);
537 }
538 }
539 }
540
541 /* Finally, restore the FPEXC but don't enable the VFP. */
542 fregs->vfp_fpexc |= VFP_FPEXC_EN;
543 write_fpexc(fregs->vfp_fpexc);
544 }
545
546 void
547 vfp_state_save(lwp_t *l, u_int flags)
548 {
549 struct pcb * const pcb = lwp_getpcb(l);
550 uint32_t fpexc = read_fpexc();
551 write_fpexc((fpexc | VFP_FPEXC_EN) & ~VFP_FPEXC_EX);
552
553 if (flags & PCU_KERNEL) {
554 /*
555 * Save the kernel set of VFP registers.
556 * (just the first 16).
557 */
558 save_vfpregs_lo(pcb->pcb_kernel_vfp.vfp_regs);
559 return;
560 }
561
562 struct vfpreg * const fregs = &pcb->pcb_vfp;
563
564 /*
565 * Enable the VFP (so we can read the registers).
566 * Make sure the exception bit is cleared so that we can
567 * safely dump the registers.
568 */
569 fregs->vfp_fpexc = fpexc;
570 if (fpexc & VFP_FPEXC_EX) {
571 struct cpu_info * const ci = curcpu();
572 /* Need to save the exception handling state */
573 switch (ci->ci_vfp_id) {
574 case FPU_VFP10_ARM10E:
575 case FPU_VFP11_ARM11:
576 case FPU_VFP_CORTEXA5:
577 case FPU_VFP_CORTEXA7:
578 case FPU_VFP_CORTEXA8:
579 case FPU_VFP_CORTEXA9:
580 fregs->vfp_fpinst = read_fpinst();
581 fregs->vfp_fpinst2 = read_fpinst2();
582 break;
583 default:
584 panic("%s: Unsupported VFP %#x",
585 __func__, ci->ci_vfp_id);
586 }
587 }
588 fregs->vfp_fpscr = read_fpscr();
589 save_vfpregs(fregs);
590
591 /* Disable the VFP. */
592 write_fpexc(fpexc);
593 }
594
595 void
596 vfp_state_release(lwp_t *l, u_int flags)
597 {
598 struct pcb * const pcb = lwp_getpcb(l);
599
600 if (flags & PCU_KERNEL) {
601 /*
602 * Restore the FPEXC since we borrowed that field.
603 */
604 pcb->pcb_vfp.vfp_fpexc = pcb->pcb_kernel_vfp.vfp_fpexc;
605 } else {
606 /*
607 * Now mark the VFP as disabled (and our state
608 * has been already saved or is being discarded).
609 */
610 pcb->pcb_vfp.vfp_fpexc &= ~VFP_FPEXC_EN;
611 }
612
613 /*
614 * Turn off the FPU so the next time a VFP instruction is issued
615 * an exception happens. We don't know if this LWP's state was
616 * loaded but if we turned off the FPU for some other LWP, when
617 * pcu_load invokes vfp_state_load it will see that VFP_FPEXC_EN
618 * is still set so it just restore fpexc and return since its
619 * contents are still sitting in the VFP.
620 */
621 write_fpexc(read_fpexc() & ~VFP_FPEXC_EN);
622 }
623
624 void
625 vfp_savecontext(void)
626 {
627 pcu_save(&arm_vfp_ops);
628 }
629
630 void
631 vfp_discardcontext(void)
632 {
633 pcu_discard(&arm_vfp_ops);
634 }
635
636 void
637 vfp_kernel_acquire(void)
638 {
639 if (__predict_false(cpu_intr_p())) {
640 write_fpexc(VFP_FPEXC_EN);
641 if (curcpu()->ci_data.cpu_pcu_curlwp[PCU_FPU] != NULL) {
642 lwp_t * const l = curlwp;
643 struct pcb * const pcb = lwp_getpcb(l);
644 KASSERT((l->l_md.md_flags & MDLWP_VFPINTR) == 0);
645 l->l_md.md_flags |= MDLWP_VFPINTR;
646 save_vfpregs_lo(&pcb->pcb_kernel_vfp.vfp_regs[16]);
647 }
648 } else {
649 pcu_kernel_acquire(&arm_vfp_ops);
650 }
651 }
652
653 void
654 vfp_kernel_release(void)
655 {
656 if (__predict_false(cpu_intr_p())) {
657 uint32_t fpexc = 0;
658 if (curcpu()->ci_data.cpu_pcu_curlwp[PCU_FPU] != NULL) {
659 lwp_t * const l = curlwp;
660 struct pcb * const pcb = lwp_getpcb(l);
661 KASSERT(l->l_md.md_flags & MDLWP_VFPINTR);
662 load_vfpregs_lo(&pcb->pcb_kernel_vfp.vfp_regs[16]);
663 l->l_md.md_flags &= ~MDLWP_VFPINTR;
664 fpexc = pcb->pcb_vfp.vfp_fpexc;
665 }
666 write_fpexc(fpexc);
667 } else {
668 pcu_kernel_release(&arm_vfp_ops);
669 }
670 }
671
672 void
673 vfp_getcontext(struct lwp *l, mcontext_t *mcp, int *flagsp)
674 {
675 if (l->l_md.md_flags & MDLWP_VFPUSED) {
676 const struct pcb * const pcb = lwp_getpcb(l);
677 pcu_save(&arm_vfp_ops);
678 mcp->__fpu.__vfpregs.__vfp_fpscr = pcb->pcb_vfp.vfp_fpscr;
679 memcpy(mcp->__fpu.__vfpregs.__vfp_fstmx, pcb->pcb_vfp.vfp_regs,
680 sizeof(mcp->__fpu.__vfpregs.__vfp_fstmx));
681 *flagsp |= _UC_FPU|_UC_ARM_VFP;
682 }
683 }
684
685 void
686 vfp_setcontext(struct lwp *l, const mcontext_t *mcp)
687 {
688 pcu_discard(&arm_vfp_ops);
689 struct pcb * const pcb = lwp_getpcb(l);
690 l->l_md.md_flags |= MDLWP_VFPUSED;
691 pcb->pcb_vfp.vfp_fpscr = mcp->__fpu.__vfpregs.__vfp_fpscr;
692 memcpy(pcb->pcb_vfp.vfp_regs, mcp->__fpu.__vfpregs.__vfp_fstmx,
693 sizeof(mcp->__fpu.__vfpregs.__vfp_fstmx));
694 }
695
696 #endif /* FPU_VFP */
697