altivec.c revision 1.2 1 1.2 matt /* $NetBSD: altivec.c,v 1.2 2003/03/05 05:27:25 matt Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright (C) 1996 Wolfgang Solfrank.
5 1.1 matt * Copyright (C) 1996 TooLs GmbH.
6 1.1 matt * All rights reserved.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt * 3. All advertising materials mentioning features or use of this software
17 1.1 matt * must display the following acknowledgement:
18 1.1 matt * This product includes software developed by TooLs GmbH.
19 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 1.1 matt * derived from this software without specific prior written permission.
21 1.1 matt *
22 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 1.1 matt */
33 1.1 matt #include <sys/param.h>
34 1.1 matt #include <sys/proc.h>
35 1.1 matt #include <sys/sa.h>
36 1.1 matt #include <sys/systm.h>
37 1.1 matt #include <sys/user.h>
38 1.1 matt #include <sys/malloc.h>
39 1.1 matt #include <sys/pool.h>
40 1.1 matt
41 1.1 matt #include <powerpc/altivec.h>
42 1.1 matt #include <powerpc/spr.h>
43 1.1 matt #include <powerpc/psl.h>
44 1.1 matt
45 1.1 matt void
46 1.1 matt enable_vec()
47 1.1 matt {
48 1.1 matt struct cpu_info *ci = curcpu();
49 1.1 matt struct lwp *l = curlwp;
50 1.1 matt struct pcb *pcb = &l->l_addr->u_pcb;
51 1.1 matt struct trapframe *tf = trapframe(l);
52 1.2 matt struct vreg *vr = &pcb->pcb_vr;
53 1.2 matt register_t msr;
54 1.1 matt
55 1.1 matt KASSERT(pcb->pcb_veccpu == NULL);
56 1.1 matt
57 1.2 matt pcb->pcb_flags |= PCB_ALTIVEC;
58 1.1 matt
59 1.1 matt /*
60 1.1 matt * Enable AltiVec temporarily (and disable interrupts).
61 1.1 matt */
62 1.1 matt msr = mfmsr();
63 1.1 matt mtmsr((msr & ~PSL_EE) | PSL_VEC);
64 1.1 matt __asm __volatile ("isync");
65 1.1 matt if (ci->ci_veclwp) {
66 1.1 matt save_vec_cpu();
67 1.1 matt }
68 1.1 matt KASSERT(curcpu()->ci_veclwp == NULL);
69 1.1 matt
70 1.1 matt /*
71 1.1 matt * Restore VSCR by first loading it into a vector and then into VSCR.
72 1.1 matt * (this needs to done before loading the user's vector registers
73 1.1 matt * since we need to use a scratch vector register)
74 1.1 matt */
75 1.1 matt __asm __volatile("vxor %2,%2,%2; lvewx %2,%0,%1; mtvscr %2" \
76 1.2 matt :: "b"(vr), "r"(offsetof(struct vreg, vscr)), "n"(0));
77 1.1 matt
78 1.1 matt /*
79 1.1 matt * VRSAVE will be restored when trap frame returns
80 1.1 matt */
81 1.1 matt tf->tf_xtra[TF_VRSAVE] = vr->vrsave;
82 1.1 matt
83 1.1 matt #define LVX(n,vr) __asm /*__volatile*/("lvx %2,%0,%1" \
84 1.2 matt :: "b"(vr), "r"(offsetof(struct vreg, vreg[n])), "n"(n));
85 1.1 matt
86 1.1 matt /*
87 1.1 matt * Load all 32 vector registers
88 1.1 matt */
89 1.1 matt LVX( 0,vr); LVX( 1,vr); LVX( 2,vr); LVX( 3,vr);
90 1.1 matt LVX( 4,vr); LVX( 5,vr); LVX( 6,vr); LVX( 7,vr);
91 1.1 matt LVX( 8,vr); LVX( 9,vr); LVX(10,vr); LVX(11,vr);
92 1.1 matt LVX(12,vr); LVX(13,vr); LVX(14,vr); LVX(15,vr);
93 1.1 matt
94 1.1 matt LVX(16,vr); LVX(17,vr); LVX(18,vr); LVX(19,vr);
95 1.1 matt LVX(20,vr); LVX(21,vr); LVX(22,vr); LVX(23,vr);
96 1.1 matt LVX(24,vr); LVX(25,vr); LVX(26,vr); LVX(27,vr);
97 1.1 matt LVX(28,vr); LVX(29,vr); LVX(30,vr); LVX(31,vr);
98 1.1 matt __asm __volatile ("isync");
99 1.1 matt
100 1.1 matt /*
101 1.1 matt * Enable AltiVec when we return to user-mode.
102 1.1 matt * Record the new ownership of the AltiVec unit.
103 1.1 matt */
104 1.1 matt tf->srr1 |= PSL_VEC;
105 1.1 matt curcpu()->ci_veclwp = l;
106 1.1 matt pcb->pcb_veccpu = curcpu();
107 1.1 matt __asm __volatile ("sync");
108 1.1 matt
109 1.1 matt /*
110 1.1 matt * Restore MSR (turn off AltiVec)
111 1.1 matt */
112 1.1 matt mtmsr(msr);
113 1.1 matt }
114 1.1 matt
115 1.1 matt void
116 1.1 matt save_vec_cpu(void)
117 1.1 matt {
118 1.1 matt struct cpu_info *ci = curcpu();
119 1.1 matt struct lwp *l;
120 1.1 matt struct pcb *pcb;
121 1.1 matt struct vreg *vr;
122 1.1 matt struct trapframe *tf;
123 1.2 matt register_t msr;
124 1.1 matt
125 1.1 matt /*
126 1.1 matt * Turn on AltiVEC, turn off interrupts.
127 1.1 matt */
128 1.1 matt msr = mfmsr();
129 1.1 matt mtmsr((msr & ~PSL_EE) | PSL_VEC);
130 1.1 matt __asm __volatile ("isync");
131 1.1 matt l = ci->ci_veclwp;
132 1.1 matt if (l == NULL) {
133 1.1 matt goto out;
134 1.1 matt }
135 1.1 matt pcb = &l->l_addr->u_pcb;
136 1.2 matt vr = &pcb->pcb_vr;
137 1.1 matt tf = trapframe(l);
138 1.1 matt
139 1.1 matt #define STVX(n,vr) __asm /*__volatile*/("stvx %2,%0,%1" \
140 1.2 matt :: "b"(vr), "r"(offsetof(struct vreg, vreg[n])), "n"(n));
141 1.1 matt
142 1.1 matt /*
143 1.1 matt * Save the vector registers.
144 1.1 matt */
145 1.1 matt STVX( 0,vr); STVX( 1,vr); STVX( 2,vr); STVX( 3,vr);
146 1.1 matt STVX( 4,vr); STVX( 5,vr); STVX( 6,vr); STVX( 7,vr);
147 1.1 matt STVX( 8,vr); STVX( 9,vr); STVX(10,vr); STVX(11,vr);
148 1.1 matt STVX(12,vr); STVX(13,vr); STVX(14,vr); STVX(15,vr);
149 1.1 matt
150 1.1 matt STVX(16,vr); STVX(17,vr); STVX(18,vr); STVX(19,vr);
151 1.1 matt STVX(20,vr); STVX(21,vr); STVX(22,vr); STVX(23,vr);
152 1.1 matt STVX(24,vr); STVX(25,vr); STVX(26,vr); STVX(27,vr);
153 1.1 matt STVX(28,vr); STVX(29,vr); STVX(30,vr); STVX(31,vr);
154 1.1 matt
155 1.1 matt /*
156 1.1 matt * Save VSCR (this needs to be done after save the vector registers
157 1.1 matt * since we need to use one as scratch).
158 1.1 matt */
159 1.1 matt __asm __volatile("mfvscr %2; stvewx %2,%0,%1" \
160 1.2 matt :: "b"(vr), "r"(offsetof(struct vreg, vscr)), "n"(0));
161 1.1 matt
162 1.1 matt /*
163 1.1 matt * Save VRSAVE
164 1.1 matt */
165 1.1 matt vr->vrsave = tf->tf_xtra[TF_VRSAVE];
166 1.1 matt
167 1.1 matt /*
168 1.1 matt * Note that we aren't using any CPU resources and stop any
169 1.1 matt * data streams.
170 1.1 matt */
171 1.1 matt tf->srr1 &= ~PSL_VEC;
172 1.1 matt pcb->pcb_veccpu = NULL;
173 1.1 matt ci->ci_veclwp = NULL;
174 1.1 matt __asm __volatile ("dssall; sync");
175 1.1 matt
176 1.1 matt out:
177 1.1 matt
178 1.1 matt /*
179 1.1 matt * Restore MSR (turn off AltiVec)
180 1.1 matt */
181 1.1 matt mtmsr(msr);
182 1.1 matt }
183 1.1 matt
184 1.1 matt /*
185 1.1 matt * Save a process's AltiVEC state to its PCB. The state may be in any CPU.
186 1.1 matt * The process must either be curproc or traced by curproc (and stopped).
187 1.1 matt * (The point being that the process must not run on another CPU during
188 1.1 matt * this function).
189 1.1 matt */
190 1.1 matt void
191 1.1 matt save_vec_lwp(l)
192 1.1 matt struct lwp *l;
193 1.1 matt {
194 1.1 matt struct pcb *pcb = &l->l_addr->u_pcb;
195 1.1 matt struct cpu_info *ci = curcpu();
196 1.1 matt
197 1.1 matt /*
198 1.1 matt * If it's already in the PCB, there's nothing to do.
199 1.1 matt */
200 1.1 matt
201 1.1 matt if (pcb->pcb_veccpu == NULL) {
202 1.1 matt return;
203 1.1 matt }
204 1.1 matt
205 1.1 matt /*
206 1.1 matt * If the state is in the current CPU, just flush the current CPU's
207 1.1 matt * state.
208 1.1 matt */
209 1.1 matt
210 1.1 matt if (l == ci->ci_veclwp) {
211 1.1 matt save_vec_cpu();
212 1.1 matt return;
213 1.1 matt }
214 1.1 matt
215 1.1 matt #ifdef MULTIPROCESSOR
216 1.1 matt
217 1.1 matt /*
218 1.1 matt * It must be on another CPU, flush it from there.
219 1.1 matt */
220 1.1 matt
221 1.1 matt mp_save_vec_lwp(l);
222 1.1 matt #endif
223 1.1 matt }
224 1.1 matt
225 1.1 matt #define ZERO_VEC 19
226 1.1 matt
227 1.1 matt void
228 1.1 matt vzeropage(paddr_t pa)
229 1.1 matt {
230 1.1 matt const paddr_t ea = pa + NBPG;
231 1.1 matt uint32_t vec[7], *vp = (void *) roundup((uintptr_t) vec, 16);
232 1.2 matt register_t omsr, msr;
233 1.1 matt
234 1.1 matt __asm __volatile("mfmsr %0" : "=r"(omsr) :);
235 1.1 matt
236 1.1 matt /*
237 1.1 matt * Turn on AltiVec, turn off interrupts.
238 1.1 matt */
239 1.1 matt msr = (omsr & ~PSL_EE) | PSL_VEC;
240 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(msr));
241 1.1 matt
242 1.1 matt /*
243 1.1 matt * Save the VEC register we are going to use before we disable
244 1.1 matt * relocation.
245 1.1 matt */
246 1.1 matt __asm("stvx %1,0,%0" :: "r"(vp), "n"(ZERO_VEC));
247 1.1 matt __asm("vxor %0,%0,%0" :: "n"(ZERO_VEC));
248 1.1 matt
249 1.1 matt /*
250 1.1 matt * Turn off data relocation (DMMU off).
251 1.1 matt */
252 1.1 matt msr &= ~PSL_DR;
253 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(msr));
254 1.1 matt
255 1.1 matt /*
256 1.1 matt * Zero the page using a single cache line.
257 1.1 matt */
258 1.1 matt do {
259 1.2 matt __asm("stvx %2,%0,%1" :: "b"(pa), "r"( 0), "n"(ZERO_VEC));
260 1.2 matt __asm("stvxl %2,%0,%1" :: "b"(pa), "r"(16), "n"(ZERO_VEC));
261 1.2 matt __asm("stvx %2,%0,%1" :: "b"(pa), "r"(32), "n"(ZERO_VEC));
262 1.2 matt __asm("stvxl %2,%0,%1" :: "b"(pa), "r"(48), "n"(ZERO_VEC));
263 1.1 matt pa += 64;
264 1.1 matt } while (pa < ea);
265 1.1 matt
266 1.1 matt /*
267 1.1 matt * Restore data relocation (DMMU on);
268 1.1 matt */
269 1.1 matt msr |= PSL_DR;
270 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(msr));
271 1.1 matt
272 1.1 matt /*
273 1.1 matt * Restore VEC register (now that we can access the stack again).
274 1.1 matt */
275 1.1 matt __asm("lvx %1,0,%0" :: "r"(vp), "n"(ZERO_VEC));
276 1.1 matt
277 1.1 matt /*
278 1.1 matt * Restore old MSR (AltiVec OFF).
279 1.1 matt */
280 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(omsr));
281 1.1 matt }
282 1.1 matt
283 1.1 matt #define LO_VEC 16
284 1.1 matt #define HI_VEC 17
285 1.1 matt
286 1.1 matt void
287 1.1 matt vcopypage(paddr_t dst, paddr_t src)
288 1.1 matt {
289 1.1 matt const paddr_t edst = dst + NBPG;
290 1.1 matt uint32_t vec[11], *vp = (void *) roundup((uintptr_t) vec, 16);
291 1.2 matt register_t omsr, msr;
292 1.1 matt
293 1.1 matt __asm __volatile("mfmsr %0" : "=r"(omsr) :);
294 1.1 matt
295 1.1 matt /*
296 1.1 matt * Turn on AltiVec, turn off interrupts.
297 1.1 matt */
298 1.1 matt msr = (omsr & ~PSL_EE) | PSL_VEC;
299 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(msr));
300 1.1 matt
301 1.1 matt /*
302 1.1 matt * Save the VEC registers we will be using before we disable
303 1.1 matt * relocation.
304 1.1 matt */
305 1.2 matt __asm("stvx %2,%1,%0" :: "b"(vp), "r"( 0), "n"(LO_VEC));
306 1.2 matt __asm("stvx %2,%1,%0" :: "b"(vp), "r"(16), "n"(HI_VEC));
307 1.1 matt
308 1.1 matt /*
309 1.1 matt * Turn off data relocation (DMMU off).
310 1.1 matt */
311 1.1 matt msr &= ~PSL_DR;
312 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(msr));
313 1.1 matt
314 1.1 matt /*
315 1.1 matt * Copy the page using a single cache line. On most PPCs, two
316 1.1 matt * vector registers occupy one cache line.
317 1.1 matt */
318 1.1 matt do {
319 1.2 matt __asm("lvx %2,%0,%1" :: "b"(src), "r"( 0), "n"(LO_VEC));
320 1.2 matt __asm("stvx %2,%0,%1" :: "b"(dst), "r"( 0), "n"(LO_VEC));
321 1.2 matt __asm("lvxl %2,%0,%1" :: "b"(src), "r"(16), "n"(HI_VEC));
322 1.2 matt __asm("stvxl %2,%0,%1" :: "b"(dst), "r"(16), "n"(HI_VEC));
323 1.1 matt src += 32;
324 1.1 matt dst += 32;
325 1.1 matt } while (dst < edst);
326 1.1 matt
327 1.1 matt /*
328 1.1 matt * Restore data relocation (DMMU on);
329 1.1 matt */
330 1.1 matt msr |= PSL_DR;
331 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(msr));
332 1.1 matt
333 1.1 matt /*
334 1.1 matt * Restore VEC registers (now that we can access the stack again).
335 1.1 matt */
336 1.2 matt __asm("lvx %2,%1,%0" :: "b"(vp), "r"( 0), "n"(LO_VEC));
337 1.2 matt __asm("lvx %2,%1,%0" :: "b"(vp), "r"(16), "n"(HI_VEC));
338 1.1 matt
339 1.1 matt /*
340 1.1 matt * Restore old MSR (AltiVec OFF).
341 1.1 matt */
342 1.1 matt __asm __volatile("sync; mtmsr %0; isync" :: "r"(omsr));
343 1.1 matt }
344