sxreg.h revision 1.11.8.2 1 1.11.8.2 yamt /* $NetBSD: sxreg.h,v 1.11.8.2 2014/05/22 11:40:08 yamt Exp $ */
2 1.11.8.2 yamt
3 1.11.8.2 yamt /*-
4 1.11.8.2 yamt * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.11.8.2 yamt * All rights reserved.
6 1.11.8.2 yamt *
7 1.11.8.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.11.8.2 yamt * by Michael Lorenz.
9 1.11.8.2 yamt *
10 1.11.8.2 yamt * Redistribution and use in source and binary forms, with or without
11 1.11.8.2 yamt * modification, are permitted provided that the following conditions
12 1.11.8.2 yamt * are met:
13 1.11.8.2 yamt * 1. Redistributions of source code must retain the above copyright
14 1.11.8.2 yamt * notice, this list of conditions and the following disclaimer.
15 1.11.8.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.11.8.2 yamt * notice, this list of conditions and the following disclaimer in the
17 1.11.8.2 yamt * documentation and/or other materials provided with the distribution.
18 1.11.8.2 yamt *
19 1.11.8.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.11.8.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.11.8.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.11.8.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.11.8.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.11.8.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.11.8.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.11.8.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.11.8.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.11.8.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.11.8.2 yamt * POSSIBILITY OF SUCH DAMAGE.
30 1.11.8.2 yamt */
31 1.11.8.2 yamt
32 1.11.8.2 yamt /* register definitions for Sun's SX / SPAM rendering engine */
33 1.11.8.2 yamt
34 1.11.8.2 yamt #ifndef SXREG_H
35 1.11.8.2 yamt #define SXREG_H
36 1.11.8.2 yamt
37 1.11.8.2 yamt /* SX control registers */
38 1.11.8.2 yamt #define SX_CONTROL_STATUS 0x00000000
39 1.11.8.2 yamt #define SX_ERROR 0x00000004
40 1.11.8.2 yamt #define SX_PAGE_BOUND_LOWER 0x00000008
41 1.11.8.2 yamt #define SX_PAGE_BOUND_UPPER 0x0000000c
42 1.11.8.2 yamt #define SX_PLANEMASK 0x00000010
43 1.11.8.2 yamt #define SX_ROP_CONTROL 0x00000014 /* 8 bit ROP */
44 1.11.8.2 yamt #define SX_IQ_OVERFLOW_COUNTER 0x00000018
45 1.11.8.2 yamt #define SX_DIAGNOSTICS 0x0000001c
46 1.11.8.2 yamt #define SX_INSTRUCTIONS 0x00000020
47 1.11.8.2 yamt #define SX_ID 0x00000028
48 1.11.8.2 yamt #define SX_R0_INIT 0x0000002c
49 1.11.8.2 yamt #define SX_SOFTRESET 0x00000030
50 1.11.8.2 yamt /* write registers directly, only when processor is stopped */
51 1.11.8.2 yamt #define SX_DIRECT_R0 0x00000100
52 1.11.8.2 yamt #define SX_DIRECT_R1 0x00000104 /* and so on until R127 */
53 1.11.8.2 yamt /* write registers via pseudo instructions */
54 1.11.8.2 yamt #define SX_QUEUED_R0 0x00000300
55 1.11.8.2 yamt #define SX_QUEUED_R1 0x00000304 /* and so on until R127 */
56 1.11.8.2 yamt #define SX_QUEUED(r) (0x300 + (r << 2))
57 1.11.8.2 yamt
58 1.11.8.2 yamt /* special purpose registers */
59 1.11.8.2 yamt #define R_ZERO 0
60 1.11.8.2 yamt #define R_SCAM 1
61 1.11.8.2 yamt #define R_MASK 2 /* bitmask for SX_STORE_SELECT */
62 1.11.8.2 yamt
63 1.11.8.2 yamt /*
64 1.11.8.2 yamt * registers are repeated at 0x1000 with certain parts read only
65 1.11.8.2 yamt * ( like the PAGE_BOUND registers ) which userland has no business writing to
66 1.11.8.2 yamt */
67 1.11.8.2 yamt
68 1.11.8.2 yamt /* SX_CONTROL_STATUS */
69 1.11.8.2 yamt #define SX_EE1 0x00000001 /* illegal instruction */
70 1.11.8.2 yamt #define SX_EE2 0x00000002 /* page bound error */
71 1.11.8.2 yamt #define SX_EE3 0x00000004 /* illegal memory access */
72 1.11.8.2 yamt #define SX_EE4 0x00000008 /* illegal register access */
73 1.11.8.2 yamt #define SX_EE5 0x00000010 /* alignment violation */
74 1.11.8.2 yamt #define SX_EE6 0x00000020 /* illegal instruction queue write */
75 1.11.8.2 yamt #define SX_EI 0x00000080 /* interrupt on error */
76 1.11.8.2 yamt #define SX_PB 0x00001000 /* enable page bound checking */
77 1.11.8.2 yamt #define SX_WO 0x00002000 /* write occured ( by SX ) */
78 1.11.8.2 yamt #define SX_GO 0x00004000 /* start/stop the processor */
79 1.11.8.2 yamt #define SX_MT 0x00008000 /* instruction queue is empty */
80 1.11.8.2 yamt
81 1.11.8.2 yamt /* SX_ERROR */
82 1.11.8.2 yamt #define SX_SE1 0x00000001 /* illegal instruction */
83 1.11.8.2 yamt #define SX_SE2 0x00000002 /* page bound error */
84 1.11.8.2 yamt #define SX_SE3 0x00000004 /* illegal memory access */
85 1.11.8.2 yamt #define SX_SE4 0x00000008 /* illegal register access */
86 1.11.8.2 yamt #define SX_SE5 0x00000010 /* alignment violation */
87 1.11.8.2 yamt #define SX_SE6 0x00000020 /* illegal instruction queue write */
88 1.11.8.2 yamt #define SX_SI 0x00000080 /* interrupt on error */
89 1.11.8.2 yamt
90 1.11.8.2 yamt /* SX_ID */
91 1.11.8.2 yamt #define SX_ARCHITECTURE_MASK 0x000000ff
92 1.11.8.2 yamt #define SX_CHIP_REVISION 0x0000ff00
93 1.11.8.2 yamt
94 1.11.8.2 yamt /* SX_DIAGNOSTICS */
95 1.11.8.2 yamt #define SX_IQ_FIFO_ACCESS 0x00000001 /* allow memory instructions
96 1.11.8.2 yamt * in SX_INSTRUCTIONS */
97 1.11.8.2 yamt
98 1.11.8.2 yamt /*
99 1.11.8.2 yamt * memory referencing instructions are written to 0x800000000 + PA
100 1.11.8.2 yamt * so we have to go through ASI 0x28 ( ASI_BYPASS + 8 )
101 1.11.8.2 yamt */
102 1.11.8.2 yamt #define ASI_SX 0x28
103 1.11.8.2 yamt
104 1.11.8.2 yamt /* load / store instructions */
105 1.11.8.2 yamt #define SX_STORE_COND (0x4 << 19) /* conditional write with mask */
106 1.11.8.2 yamt #define SX_STORE_CLAMP (0x2 << 19)
107 1.11.8.2 yamt #define SX_STORE_MASK (0x1 << 19) /* apply plane mask */
108 1.11.8.2 yamt #define SX_STORE_SELECT (0x8 << 19) /* expand with plane reg dest[0]/dest[1] */
109 1.11.8.2 yamt #define SX_LOAD (0xa << 19)
110 1.11.8.2 yamt #define SX_STORE (0x0 << 19)
111 1.11.8.2 yamt
112 1.11.8.2 yamt /* data type */
113 1.11.8.2 yamt #define SX_UBYTE_0 (0x00 << 14)
114 1.11.8.2 yamt #define SX_UBYTE_8 (0x01 << 14)
115 1.11.8.2 yamt #define SX_UBYTE_16 (0x02 << 14)
116 1.11.8.2 yamt #define SX_UBYTE_24 (0x03 << 14)
117 1.11.8.2 yamt #define SX_SBYTE_0 (0x04 << 14)
118 1.11.8.2 yamt #define SX_SBYTE_8 (0x05 << 14)
119 1.11.8.2 yamt #define SX_SBYTE_16 (0x06 << 14)
120 1.11.8.2 yamt #define SX_SBYTE_24 (0x07 << 14)
121 1.11.8.2 yamt #define SX_UQUAD_0 (0x08 << 14)
122 1.11.8.2 yamt #define SX_UQUAD_8 (0x09 << 14)
123 1.11.8.2 yamt #define SX_UQUAD_16 (0x0a << 14)
124 1.11.8.2 yamt #define SX_UQUAD_24 (0x0b << 14)
125 1.11.8.2 yamt #define SX_SQUAD_0 (0x0c << 14)
126 1.11.8.2 yamt #define SX_SQUAD_8 (0x0d << 14)
127 1.11.8.2 yamt #define SX_SQUAD_16 (0x0e << 14)
128 1.11.8.2 yamt #define SX_SQUAD_24 (0x0f << 14)
129 1.11.8.2 yamt #define SX_UCHAN_0 (0x10 << 14)
130 1.11.8.2 yamt #define SX_UCHAN_8 (0x11 << 14)
131 1.11.8.2 yamt #define SX_UCHAN_16 (0x12 << 14)
132 1.11.8.2 yamt #define SX_UCHAN_24 (0x13 << 14)
133 1.11.8.2 yamt #define SX_SCHAN_0 (0x14 << 14)
134 1.11.8.2 yamt #define SX_SCHAN_8 (0x15 << 14)
135 1.11.8.2 yamt #define SX_SCHAN_16 (0x16 << 14)
136 1.11.8.2 yamt #define SX_SCHAN_24 (0x17 << 14)
137 1.11.8.2 yamt #define SX_USHORT_0 (0x18 << 14)
138 1.11.8.2 yamt #define SX_USHORT_8 (0x19 << 14)
139 1.11.8.2 yamt #define SX_USHORT_16 (0x1a << 14)
140 1.11.8.2 yamt #define SX_SSHORT_0 (0x1c << 14)
141 1.11.8.2 yamt #define SX_SSHORT_8 (0x1d << 14)
142 1.11.8.2 yamt #define SX_SSHORT_16 (0x1e << 14)
143 1.11.8.2 yamt #define SX_LONG (0x1b << 14)
144 1.11.8.2 yamt #define SX_PACKED (0x1f << 14)
145 1.11.8.2 yamt
146 1.11.8.2 yamt
147 1.11.8.2 yamt #define SX_LD(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
148 1.11.8.2 yamt SX_LONG | (dreg << 7) | (o))
149 1.11.8.2 yamt #define SX_LDB(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
150 1.11.8.2 yamt SX_UBYTE_0 | (dreg << 7) | (o))
151 1.11.8.2 yamt #define SX_LDP(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
152 1.11.8.2 yamt SX_PACKED | (dreg << 7) | (o))
153 1.11.8.2 yamt #define SX_LDUQ0(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
154 1.11.8.2 yamt SX_UQUAD_0 | (dreg << 7) | (o))
155 1.11.8.2 yamt #define SX_LDUQ8(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
156 1.11.8.2 yamt SX_UQUAD_8 | (dreg << 7) | (o))
157 1.11.8.2 yamt #define SX_LDUQ16(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
158 1.11.8.2 yamt SX_UQUAD_16 | (dreg << 7) | (o))
159 1.11.8.2 yamt #define SX_LDUQ24(dreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_LOAD | \
160 1.11.8.2 yamt SX_UQUAD_24 | (dreg << 7) | (o))
161 1.11.8.2 yamt #define SX_ST(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
162 1.11.8.2 yamt SX_LONG | (sreg << 7) | (o))
163 1.11.8.2 yamt #define SX_STM(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_MASK | \
164 1.11.8.2 yamt SX_LONG | (sreg << 7) | (o))
165 1.11.8.2 yamt #define SX_STB(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
166 1.11.8.2 yamt SX_UBYTE_0 | (sreg << 7) | (o))
167 1.11.8.2 yamt #define SX_STBC(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_CLAMP | \
168 1.11.8.2 yamt SX_UBYTE_0 | (sreg << 7) | (o))
169 1.11.8.2 yamt #define SX_STP(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
170 1.11.8.2 yamt SX_PACKED | (sreg << 7) | (o))
171 1.11.8.2 yamt #define SX_STS(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_SELECT \
172 1.11.8.2 yamt | SX_LONG | (sreg << 7) | (o))
173 1.11.8.2 yamt #define SX_STBS(reg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_SELECT \
174 1.11.8.2 yamt | SX_UBYTE_0 | (reg << 7) | (o))
175 1.11.8.2 yamt #define SX_STUQ0(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
176 1.11.8.2 yamt SX_UQUAD_0 | (sreg << 7) | (o))
177 1.11.8.2 yamt #define SX_STUQ0C(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE_CLAMP | \
178 1.11.8.2 yamt SX_UQUAD_0 | (sreg << 7) | (o))
179 1.11.8.2 yamt #define SX_STUQ8(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
180 1.11.8.2 yamt SX_UQUAD_8 | (sreg << 7) | (o))
181 1.11.8.2 yamt #define SX_STUQ16(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
182 1.11.8.2 yamt SX_UQUAD_16 | (sreg << 7) | (o))
183 1.11.8.2 yamt #define SX_STUQ24(sreg, cnt, o) (0x80000000 | ((cnt) << 23) | SX_STORE | \
184 1.11.8.2 yamt SX_UQUAD_24 | (sreg << 7) | (o))
185 1.11.8.2 yamt
186 1.11.8.2 yamt /* ROP and SELECT instructions */
187 1.11.8.2 yamt #define SX_ROPB (0x0 << 21) /* mask bits apply to bytes */
188 1.11.8.2 yamt #define SX_ROPM (0x1 << 21) /* mask bits apply to each bit */
189 1.11.8.2 yamt #define SX_ROPL (0x2 << 21) /* mask bits apply per register */
190 1.11.8.2 yamt #define SX_SELB (0x4 << 21) /* byte select scalar */
191 1.11.8.2 yamt #define SX_SELV (0x6 << 21) /* register select vector */
192 1.11.8.2 yamt #define SX_SELS (0x7 << 21) /* register select scalar */
193 1.11.8.2 yamt
194 1.11.8.2 yamt #define SX_ROP(sa, sb, d, cnt) (0x90000000 | ((cnt) << 24) | SX_ROPL | \
195 1.11.8.2 yamt ((sa) << 14) | (sb) | ((d) << 7))
196 1.11.8.2 yamt #define SX_SELECT_S(sa, sb, d, cnt) (0x90000000 | ((cnt) << 24) | SX_SELS | \
197 1.11.8.2 yamt ((sa) << 14) | (sb) | ((d) << 7))
198 1.11.8.2 yamt
199 1.11.8.2 yamt /* multiply group */
200 1.11.8.2 yamt #define SX_M16X16SR0 (0x0 << 28) /* 16bit multiply, no shift */
201 1.11.8.2 yamt #define SX_M16X16SR8 (0x1 << 28) /* 16bit multiply, shift right 8 */
202 1.11.8.2 yamt #define SX_M16X16SR16 (0x2 << 28) /* 16bit multiply, shift right 16 */
203 1.11.8.2 yamt #define SX_M32X16SR0 (0x4 << 28) /* 32x16bit multiply, no shift */
204 1.11.8.2 yamt #define SX_M32X16SR8 (0x5 << 28) /* 32x16bit multiply, shift right 8 */
205 1.11.8.2 yamt #define SX_M32X16SR16 (0x6 << 28) /* 32x16bit multiply, shift right 16 */
206 1.11.8.2 yamt
207 1.11.8.2 yamt #define SX_MULTIPLY (0x0 << 21) /* normal multiplication */
208 1.11.8.2 yamt #define SX_DOT (0x1 << 21) /* dot product of A and B */
209 1.11.8.2 yamt #define SX_SAXP (0x2 << 21) /* A * SCAM + B */
210 1.11.8.2 yamt
211 1.11.8.2 yamt #define SX_ROUND (0x1 << 23) /* round results */
212 1.11.8.2 yamt
213 1.11.8.2 yamt #define SX_MUL16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
214 1.11.8.2 yamt SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
215 1.11.8.2 yamt #define SX_MUL16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
216 1.11.8.2 yamt SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
217 1.11.8.2 yamt #define SX_MUL16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
218 1.11.8.2 yamt SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
219 1.11.8.2 yamt #define SX_MUL16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
220 1.11.8.2 yamt SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
221 1.11.8.2 yamt
222 1.11.8.2 yamt #define SX_SAXP16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
223 1.11.8.2 yamt SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
224 1.11.8.2 yamt #define SX_SAXP16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
225 1.11.8.2 yamt SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
226 1.11.8.2 yamt #define SX_SAXP16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
227 1.11.8.2 yamt SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
228 1.11.8.2 yamt #define SX_SAXP16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
229 1.11.8.2 yamt SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
230 1.11.8.2 yamt
231 1.11.8.2 yamt /* logic group */
232 1.11.8.2 yamt #define SX_AND_V (0x0 << 21) /* vector AND vector */
233 1.11.8.2 yamt #define SX_AND_S (0x1 << 21) /* vector AND scalar */
234 1.11.8.2 yamt #define SX_AND_I (0x2 << 21) /* vector AND immediate */
235 1.11.8.2 yamt #define SX_XOR_V (0x3 << 21) /* vector XOR vector */
236 1.11.8.2 yamt #define SX_XOR_S (0x4 << 21) /* vector XOR scalar */
237 1.11.8.2 yamt #define SX_XOR_I (0x5 << 21) /* vector XOR immediate */
238 1.11.8.2 yamt #define SX_OR_V (0x6 << 21) /* vector OR vector */
239 1.11.8.2 yamt #define SX_OR_S (0x7 << 21) /* vector OR scalar */
240 1.11.8.2 yamt /* immediates are 7bit sign extended to 32bit */
241 1.11.8.2 yamt
242 1.11.8.2 yamt #define SX_ANDV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_V | \
243 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
244 1.11.8.2 yamt #define SX_ANDS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_S | \
245 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
246 1.11.8.2 yamt #define SX_ANDI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_I | \
247 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
248 1.11.8.2 yamt #define SX_XORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_V | \
249 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
250 1.11.8.2 yamt #define SX_XORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_S | \
251 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
252 1.11.8.2 yamt #define SX_XORI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_I | \
253 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
254 1.11.8.2 yamt #define SX_ORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_V | \
255 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
256 1.11.8.2 yamt #define SX_ORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_S | \
257 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
258 1.11.8.2 yamt
259 1.11.8.2 yamt /* arithmetic group */
260 1.11.8.2 yamt #define SX_ADD_V (0x00 << 21) /* vector + vector */
261 1.11.8.2 yamt #define SX_ADD_S (0x01 << 21) /* vector + scalar */
262 1.11.8.2 yamt #define SX_ADD_I (0x02 << 21) /* vector + immediate */
263 1.11.8.2 yamt #define SX_SUM (0x03 << 21) /* sum of vector and scalar */
264 1.11.8.2 yamt #define SX_SUB_V (0x04 << 21) /* vector - vector */
265 1.11.8.2 yamt #define SX_SUB_S (0x05 << 21) /* vector - scalar */
266 1.11.8.2 yamt #define SX_SUB_I (0x06 << 21) /* vector - immediate */
267 1.11.8.2 yamt #define SX_ABS (0x07 << 21) /* abs(sb) with sa=R0 */
268 1.11.8.2 yamt /* hardware does sa - sb for sb < 0 and sa + sb if sb > 0 */
269 1.11.8.2 yamt
270 1.11.8.2 yamt #define SX_ADDV(sa, sb, d, cnt) (0xa0000000 | ((cnt) << 24) | SX_ADD_V | \
271 1.11.8.2 yamt ((sa) << 14) | ((d) << 7) | (sb))
272 1.11.8.2 yamt
273 1.11.8.2 yamt #endif /* SXREG_H */
274