pmap_vfp.S revision 1.2 1 1.1 matt /*-
2 1.1 matt * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas of 3am Software Foundry.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt
30 1.1 matt #include "opt_cputypes.h"
31 1.1 matt
32 1.1 matt #include <machine/asm.h>
33 1.1 matt #include "assym.h"
34 1.1 matt
35 1.2 matt RCSID("$NetBSD: pmap_vfp.S,v 1.2 2012/12/10 06:51:05 matt Exp $")
36 1.2 matt
37 1.2 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
38 1.2 matt
39 1.1 matt /*
40 1.1 matt * This zeroes a page 64-bytes at a time. 64 is chosen over 32 since
41 1.1 matt * 64 is the cache line size of the Cortex-A8.
42 1.1 matt */
43 1.1 matt ENTRY(pmap_zero_page_vfp)
44 1.2 matt ldr ip, .Lkbase @ phys
45 1.2 matt ldr r3, .Lkbase+4 @ virt
46 1.2 matt sub r3, r3, ip @ diff = virt - phys
47 1.2 matt add r0, r0, r3 @ phys -> virt
48 1.1 matt mrc p10, 7, r3, c8, c0, 0
49 1.1 matt orr r2, r3, #VFP_FPEXC_EN
50 1.1 matt mcr p10, 7, r2, c8, c0, 0
51 1.1 matt vpush {d0-d7}
52 1.1 matt #if (CPU_CORTEX == 0)
53 1.1 matt mov ip, #0
54 1.1 matt vmov s0, ip
55 1.1 matt vmov s1, ip
56 1.1 matt vmov.f64 d1, d0
57 1.1 matt vmov.f64 d2, d0
58 1.1 matt vmov.f64 d3, d0
59 1.1 matt vmov.f64 d4, d0
60 1.1 matt vmov.f64 d5, d0
61 1.1 matt vmov.f64 d6, d0
62 1.1 matt vmov.f64 d7, d0
63 1.1 matt #else
64 1.1 matt veor q0, q0, q0
65 1.1 matt veor q1, q1, q1
66 1.1 matt veor q2, q2, q2
67 1.1 matt veor q3, q3, q3
68 1.1 matt #endif
69 1.1 matt add r2, r0, #PAGE_SIZE
70 1.1 matt 1: vstmia r0!, {d0-d7}
71 1.1 matt vstmia r0!, {d0-d7}
72 1.1 matt vstmia r0!, {d0-d7}
73 1.1 matt vstmia r0!, {d0-d7}
74 1.1 matt cmp r0, r2
75 1.1 matt blt 1b
76 1.1 matt vpop {d0-d7}
77 1.1 matt mcr p10, 7, r3, c8, c0, 0
78 1.1 matt bx lr
79 1.1 matt END(pmap_zero_page_vfp)
80 1.1 matt
81 1.1 matt /*
82 1.1 matt * This copies a page 64-bytes at a time. 64 is chosen over 32 since
83 1.1 matt * 64 is the cache line size of the Cortex-A8.
84 1.1 matt */
85 1.1 matt ENTRY(pmap_copy_page_vfp)
86 1.2 matt ldr ip, .Lkbase @ phys
87 1.2 matt ldr r3, .Lkbase+4 @ virt
88 1.2 matt sub r3, r3, ip @ diff = virt - phys
89 1.2 matt add r0, r0, r3 @ convert from phys to virt
90 1.2 matt add r1, r1, r3 @ convert from phys to virt
91 1.1 matt pld [r0] @ preload the first 128 bytes
92 1.1 matt pld [r0, #32]
93 1.1 matt pld [r0, #64]
94 1.1 matt pld [r0, #96]
95 1.1 matt mrc p10, 7, r3, c8, c0, 0
96 1.1 matt orr r2, r3, #VFP_FPEXC_EN
97 1.1 matt mcr p10, 7, r2, c8, c0, 0
98 1.1 matt vpush {d0-d7}
99 1.1 matt add r2, r0, #PAGE_SIZE-128
100 1.1 matt 1: pld [r0, #128] @ preload the next 128
101 1.1 matt pld [r0, #160]
102 1.1 matt pld [r0, #192]
103 1.1 matt pld [r0, #224]
104 1.1 matt 2: vldmia r0!, {d0-d7} @ read 0-63
105 1.1 matt vstmia r1!, {d0-d7} @ write 0-63
106 1.1 matt vldmia r0!, {d0-d7} @ read 64-127
107 1.1 matt vstmia r1!, {d0-d7} @ write 64-127
108 1.1 matt cmp r0, r2
109 1.1 matt blt 1b
110 1.1 matt beq 2b
111 1.1 matt vpop {d0-d7}
112 1.1 matt mcr p10, 7, r3, c8, c0, 0
113 1.1 matt bx lr
114 1.1 matt END(pmap_copy_page_vfp)
115 1.2 matt
116 1.2 matt .p2align 2
117 1.2 matt .Lkbase:
118 1.2 matt .word KERNEL_BASE_phys
119 1.2 matt .word KERNEL_BASE_virt
120 1.2 matt
121 1.2 matt #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
122