altivec_subr.S revision 1.3 1 /* $NetBSD: altivec_subr.S,v 1.3 2020/07/06 09:34:17 rin Exp $ */
2 /*-
3 * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 RCSID("$NetBSD: altivec_subr.S,v 1.3 2020/07/06 09:34:17 rin Exp $")
32
33 #ifdef _KERNEL_OPT
34 #include "opt_altivec.h"
35 #endif
36
37 #ifdef ALTIVEC
38 /*
39 * LINTSTUB: void vec_load_from_vreg(const struct vreg *vreg);
40 */
41 ENTRY(vec_load_from_vreg)
42 /*
43 * Restore VSCR by first loading it into a vector and then into
44 * VSCR. (this needs to done before loading the user's vector
45 * registers since we need to use a scratch vector register)
46 */
47 vxor %v0,%v0,%v0
48 li %r4,VREG_VSCR; lvewx %v0,%r3,%r4
49 mtvscr %v0
50
51 /*
52 * Now load the vector registers. We do it this way so that if on
53 * a superscalar cpu, we can get some concurrency.
54 */
55 li %r4,VREG_V0; lvx %v0,%r3,%r4
56 li %r5,VREG_V1; lvx %v1,%r3,%r5
57 li %r6,VREG_V2; lvx %v2,%r3,%r6
58 li %r7,VREG_V3; lvx %v3,%r3,%r7
59
60 li %r4,VREG_V4; lvx %v4,%r3,%r4
61 li %r5,VREG_V5; lvx %v5,%r3,%r5
62 li %r6,VREG_V6; lvx %v6,%r3,%r6
63 li %r7,VREG_V7; lvx %v7,%r3,%r7
64
65 li %r4,VREG_V8; lvx %v8,%r3,%r4
66 li %r5,VREG_V9; lvx %v9,%r3,%r5
67 li %r6,VREG_V10; lvx %v10,%r3,%r6
68 li %r7,VREG_V11; lvx %v11,%r3,%r7
69
70 li %r4,VREG_V12; lvx %v12,%r3,%r4
71 li %r5,VREG_V13; lvx %v13,%r3,%r5
72 li %r6,VREG_V14; lvx %v14,%r3,%r6
73 li %r7,VREG_V15; lvx %v15,%r3,%r7
74
75 li %r4,VREG_V16; lvx %v16,%r3,%r4
76 li %r5,VREG_V17; lvx %v17,%r3,%r5
77 li %r6,VREG_V18; lvx %v18,%r3,%r6
78 li %r7,VREG_V19; lvx %v19,%r3,%r7
79
80 li %r4,VREG_V20; lvx %v20,%r3,%r4
81 li %r5,VREG_V21; lvx %v21,%r3,%r5
82 li %r6,VREG_V22; lvx %v22,%r3,%r6
83 li %r7,VREG_V23; lvx %v23,%r3,%r7
84
85 li %r4,VREG_V24; lvx %v24,%r3,%r4
86 li %r5,VREG_V25; lvx %v25,%r3,%r5
87 li %r6,VREG_V26; lvx %v26,%r3,%r6
88 li %r7,VREG_V27; lvx %v27,%r3,%r7
89
90 li %r4,VREG_V28; lvx %v28,%r3,%r4
91 li %r5,VREG_V29; lvx %v29,%r3,%r5
92 li %r6,VREG_V30; lvx %v30,%r3,%r6
93 li %r7,VREG_V31; lvx %v31,%r3,%r7
94
95 isync
96 blr
97 END(vec_load_from_vreg)
98
99 /*
100 * LINTSTUB: void vec_unload_to_vreg(struct vreg *vreg);
101 */
102 ENTRY(vec_unload_to_vreg)
103 /*
104 * Store the vector registers. We do it this way so that if on
105 * a superscalar cpu, we can get some concurrency.
106 */
107 li %r4,VREG_V0; stvx %v0,%r3,%r4
108 li %r5,VREG_V1; stvx %v1,%r3,%r5
109 li %r6,VREG_V2; stvx %v2,%r3,%r6
110 li %r7,VREG_V3; stvx %v3,%r3,%r7
111
112 li %r4,VREG_V4; stvx %v4,%r3,%r4
113 li %r5,VREG_V5; stvx %v5,%r3,%r5
114 li %r6,VREG_V6; stvx %v6,%r3,%r6
115 li %r7,VREG_V7; stvx %v7,%r3,%r7
116
117 li %r4,VREG_V8; stvx %v8,%r3,%r4
118 li %r5,VREG_V9; stvx %v9,%r3,%r5
119 li %r6,VREG_V10; stvx %v10,%r3,%r6
120 li %r7,VREG_V11; stvx %v11,%r3,%r7
121
122 li %r4,VREG_V12; stvx %v12,%r3,%r4
123 li %r5,VREG_V13; stvx %v13,%r3,%r5
124 li %r6,VREG_V14; stvx %v14,%r3,%r6
125 li %r7,VREG_V15; stvx %v15,%r3,%r7
126
127 li %r4,VREG_V16; stvx %v16,%r3,%r4
128 li %r5,VREG_V17; stvx %v17,%r3,%r5
129 li %r6,VREG_V18; stvx %v18,%r3,%r6
130 li %r7,VREG_V19; stvx %v19,%r3,%r7
131
132 li %r4,VREG_V20; stvx %v20,%r3,%r4
133 li %r5,VREG_V21; stvx %v21,%r3,%r5
134 li %r6,VREG_V22; stvx %v22,%r3,%r6
135 li %r7,VREG_V23; stvx %v23,%r3,%r7
136
137 li %r4,VREG_V24; stvx %v24,%r3,%r4
138 li %r5,VREG_V25; stvx %v25,%r3,%r5
139 li %r6,VREG_V26; stvx %v26,%r3,%r6
140 li %r7,VREG_V27; stvx %v27,%r3,%r7
141
142 li %r4,VREG_V28; stvx %v28,%r3,%r4
143 li %r5,VREG_V29; stvx %v29,%r3,%r5
144 li %r6,VREG_V30; stvx %v30,%r3,%r6
145 li %r7,VREG_V31; stvx %v31,%r3,%r7
146
147 /*
148 * Save VSCR but remember to restore the vector that used to save it.
149 */
150 mfvscr %v31
151 li %r4,VREG_VSCR; stvewx %v31,%r3,%r4 /* low word only */
152
153 lvx %v31,%r3,%r7 /* restore v31 */
154
155 isync
156 blr
157 END(vec_load_from_vreg)
158 #endif /* ALTIVEC */
159